var/home/core/zuul-output/0000755000175000017500000000000015110310645014521 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110322061015460 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005214002515110322053017664 0ustar rootrootNov 22 10:37:45 crc systemd[1]: Starting Kubernetes Kubelet... Nov 22 10:37:45 crc restorecon[4673]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 10:37:45 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:37:46 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:37:47 crc restorecon[4673]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 22 10:37:48 crc kubenswrapper[4938]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.039818 4938 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045852 4938 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045900 4938 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045940 4938 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045949 4938 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045958 4938 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045965 4938 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045973 4938 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045980 4938 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045987 4938 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.045996 4938 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046005 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046013 4938 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046020 4938 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046030 4938 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046037 4938 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046045 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046052 4938 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046058 4938 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046065 4938 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046098 4938 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046105 4938 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046112 4938 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046118 4938 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046125 4938 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046132 4938 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046138 4938 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046145 4938 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046152 4938 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046158 4938 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046166 4938 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046172 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046179 4938 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046186 4938 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046192 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046199 4938 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046206 4938 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046212 4938 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046221 4938 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046228 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046236 4938 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046243 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046249 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046256 4938 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046263 4938 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046270 4938 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046276 4938 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046283 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046291 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046298 4938 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046305 4938 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046312 4938 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046319 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046327 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046334 4938 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046344 4938 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046353 4938 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046360 4938 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046369 4938 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046376 4938 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046384 4938 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046393 4938 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046400 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046408 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046415 4938 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046426 4938 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046434 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046442 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046449 4938 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046456 4938 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046463 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.046470 4938 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046598 4938 flags.go:64] FLAG: --address="0.0.0.0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046614 4938 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046630 4938 flags.go:64] FLAG: --anonymous-auth="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046641 4938 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046651 4938 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046660 4938 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046670 4938 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046679 4938 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046689 4938 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046698 4938 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046707 4938 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046715 4938 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046724 4938 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046732 4938 flags.go:64] FLAG: --cgroup-root="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046739 4938 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046748 4938 flags.go:64] FLAG: --client-ca-file="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046756 4938 flags.go:64] FLAG: --cloud-config="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046763 4938 flags.go:64] FLAG: --cloud-provider="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046771 4938 flags.go:64] FLAG: --cluster-dns="[]" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046780 4938 flags.go:64] FLAG: --cluster-domain="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046788 4938 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046796 4938 flags.go:64] FLAG: --config-dir="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046804 4938 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046812 4938 flags.go:64] FLAG: --container-log-max-files="5" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046822 4938 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046830 4938 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046838 4938 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046847 4938 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046854 4938 flags.go:64] FLAG: --contention-profiling="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046862 4938 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046870 4938 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046878 4938 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046888 4938 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046898 4938 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046906 4938 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046938 4938 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046947 4938 flags.go:64] FLAG: --enable-load-reader="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046955 4938 flags.go:64] FLAG: --enable-server="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046964 4938 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046976 4938 flags.go:64] FLAG: --event-burst="100" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046984 4938 flags.go:64] FLAG: --event-qps="50" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.046992 4938 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047000 4938 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047008 4938 flags.go:64] FLAG: --eviction-hard="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047017 4938 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047025 4938 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047033 4938 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047041 4938 flags.go:64] FLAG: --eviction-soft="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047049 4938 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047057 4938 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047065 4938 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047073 4938 flags.go:64] FLAG: --experimental-mounter-path="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047080 4938 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047088 4938 flags.go:64] FLAG: --fail-swap-on="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047096 4938 flags.go:64] FLAG: --feature-gates="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047106 4938 flags.go:64] FLAG: --file-check-frequency="20s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047114 4938 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047122 4938 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047130 4938 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047138 4938 flags.go:64] FLAG: --healthz-port="10248" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047146 4938 flags.go:64] FLAG: --help="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047154 4938 flags.go:64] FLAG: --hostname-override="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047162 4938 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047170 4938 flags.go:64] FLAG: --http-check-frequency="20s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047178 4938 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047186 4938 flags.go:64] FLAG: --image-credential-provider-config="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047194 4938 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047201 4938 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047209 4938 flags.go:64] FLAG: --image-service-endpoint="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047217 4938 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047225 4938 flags.go:64] FLAG: --kube-api-burst="100" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047233 4938 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047242 4938 flags.go:64] FLAG: --kube-api-qps="50" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047250 4938 flags.go:64] FLAG: --kube-reserved="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047267 4938 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047275 4938 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047283 4938 flags.go:64] FLAG: --kubelet-cgroups="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047291 4938 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047299 4938 flags.go:64] FLAG: --lock-file="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047306 4938 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047314 4938 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047322 4938 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047335 4938 flags.go:64] FLAG: --log-json-split-stream="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047343 4938 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047350 4938 flags.go:64] FLAG: --log-text-split-stream="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047358 4938 flags.go:64] FLAG: --logging-format="text" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047365 4938 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047374 4938 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047382 4938 flags.go:64] FLAG: --manifest-url="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047390 4938 flags.go:64] FLAG: --manifest-url-header="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047400 4938 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047408 4938 flags.go:64] FLAG: --max-open-files="1000000" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047417 4938 flags.go:64] FLAG: --max-pods="110" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047425 4938 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047433 4938 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047441 4938 flags.go:64] FLAG: --memory-manager-policy="None" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047449 4938 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047457 4938 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047467 4938 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047475 4938 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047492 4938 flags.go:64] FLAG: --node-status-max-images="50" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047500 4938 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047508 4938 flags.go:64] FLAG: --oom-score-adj="-999" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047516 4938 flags.go:64] FLAG: --pod-cidr="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047524 4938 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047537 4938 flags.go:64] FLAG: --pod-manifest-path="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047545 4938 flags.go:64] FLAG: --pod-max-pids="-1" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047553 4938 flags.go:64] FLAG: --pods-per-core="0" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047561 4938 flags.go:64] FLAG: --port="10250" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047570 4938 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047579 4938 flags.go:64] FLAG: --provider-id="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047586 4938 flags.go:64] FLAG: --qos-reserved="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047594 4938 flags.go:64] FLAG: --read-only-port="10255" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047602 4938 flags.go:64] FLAG: --register-node="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047610 4938 flags.go:64] FLAG: --register-schedulable="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047617 4938 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047631 4938 flags.go:64] FLAG: --registry-burst="10" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047638 4938 flags.go:64] FLAG: --registry-qps="5" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047646 4938 flags.go:64] FLAG: --reserved-cpus="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047654 4938 flags.go:64] FLAG: --reserved-memory="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047665 4938 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047672 4938 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047680 4938 flags.go:64] FLAG: --rotate-certificates="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047687 4938 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047695 4938 flags.go:64] FLAG: --runonce="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047703 4938 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047711 4938 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047719 4938 flags.go:64] FLAG: --seccomp-default="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047727 4938 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047735 4938 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047743 4938 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047751 4938 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047758 4938 flags.go:64] FLAG: --storage-driver-password="root" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047765 4938 flags.go:64] FLAG: --storage-driver-secure="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047773 4938 flags.go:64] FLAG: --storage-driver-table="stats" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047781 4938 flags.go:64] FLAG: --storage-driver-user="root" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047789 4938 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047797 4938 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047805 4938 flags.go:64] FLAG: --system-cgroups="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047813 4938 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047826 4938 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047833 4938 flags.go:64] FLAG: --tls-cert-file="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047841 4938 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047850 4938 flags.go:64] FLAG: --tls-min-version="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047858 4938 flags.go:64] FLAG: --tls-private-key-file="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047865 4938 flags.go:64] FLAG: --topology-manager-policy="none" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047874 4938 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047882 4938 flags.go:64] FLAG: --topology-manager-scope="container" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047890 4938 flags.go:64] FLAG: --v="2" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047900 4938 flags.go:64] FLAG: --version="false" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047934 4938 flags.go:64] FLAG: --vmodule="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047951 4938 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.047960 4938 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048143 4938 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048153 4938 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048160 4938 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048167 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048174 4938 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048182 4938 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048189 4938 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048195 4938 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048205 4938 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048213 4938 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048220 4938 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048227 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048234 4938 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048243 4938 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048251 4938 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048259 4938 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048267 4938 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048274 4938 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048281 4938 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048288 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048296 4938 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048303 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048310 4938 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048316 4938 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048324 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048330 4938 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048337 4938 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048343 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048350 4938 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048369 4938 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048376 4938 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048382 4938 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048390 4938 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048398 4938 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048407 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048415 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048422 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048430 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048436 4938 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048444 4938 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048450 4938 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048459 4938 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048468 4938 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048476 4938 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048483 4938 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048491 4938 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048498 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048505 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048514 4938 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048522 4938 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048529 4938 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048536 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048542 4938 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048550 4938 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048557 4938 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048565 4938 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048573 4938 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048580 4938 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048587 4938 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048594 4938 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048601 4938 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048611 4938 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048618 4938 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048625 4938 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048631 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048639 4938 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048647 4938 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048653 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048660 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048666 4938 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.048673 4938 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.048694 4938 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.059236 4938 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.059265 4938 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059318 4938 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059325 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059329 4938 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059333 4938 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059337 4938 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059340 4938 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059344 4938 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059348 4938 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059352 4938 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059356 4938 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059359 4938 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059362 4938 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059366 4938 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059369 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059373 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059376 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059380 4938 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059383 4938 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059386 4938 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059390 4938 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059394 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059398 4938 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059401 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059405 4938 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059409 4938 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059412 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059416 4938 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059419 4938 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059423 4938 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059426 4938 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059430 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059433 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059437 4938 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059440 4938 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059444 4938 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059449 4938 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059454 4938 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059457 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059461 4938 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059466 4938 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059470 4938 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059474 4938 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059478 4938 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059482 4938 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059487 4938 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059491 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059495 4938 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059499 4938 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059502 4938 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059506 4938 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059510 4938 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059513 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059517 4938 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059522 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059526 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059530 4938 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059534 4938 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059538 4938 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059542 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059546 4938 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059549 4938 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059554 4938 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059558 4938 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059561 4938 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059565 4938 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059568 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059572 4938 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059575 4938 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059578 4938 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059582 4938 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.059585 4938 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.059591 4938 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060006 4938 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060017 4938 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060021 4938 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060031 4938 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060036 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060041 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060044 4938 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060048 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060052 4938 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060056 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060059 4938 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060063 4938 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060066 4938 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060071 4938 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060075 4938 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060078 4938 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060087 4938 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060092 4938 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060097 4938 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.060101 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061145 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061154 4938 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061158 4938 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061162 4938 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061167 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061171 4938 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061175 4938 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061178 4938 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061187 4938 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061191 4938 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061194 4938 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061198 4938 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061202 4938 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061206 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061211 4938 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061215 4938 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061219 4938 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061224 4938 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061228 4938 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061232 4938 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061237 4938 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061248 4938 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061252 4938 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061256 4938 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061262 4938 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061269 4938 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061274 4938 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061279 4938 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061282 4938 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061286 4938 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061291 4938 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061294 4938 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061298 4938 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061305 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061308 4938 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061312 4938 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061317 4938 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061321 4938 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061324 4938 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061328 4938 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061331 4938 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061335 4938 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061339 4938 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061345 4938 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061349 4938 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061353 4938 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061359 4938 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061363 4938 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061367 4938 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061371 4938 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.061374 4938 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.061382 4938 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.062607 4938 server.go:940] "Client rotation is on, will bootstrap in background" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.066961 4938 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.067044 4938 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.068662 4938 server.go:997] "Starting client certificate rotation" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.068692 4938 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.068949 4938 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-12 22:29:52.552598013 +0000 UTC Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.069043 4938 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 491h52m4.483560031s for next certificate rotation Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.105948 4938 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.115161 4938 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.142542 4938 log.go:25] "Validated CRI v1 runtime API" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.236259 4938 log.go:25] "Validated CRI v1 image API" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.246950 4938 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.258678 4938 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-22-10-33-10-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.258707 4938 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.272094 4938 manager.go:217] Machine: {Timestamp:2025-11-22 10:37:48.269789395 +0000 UTC m=+0.737626814 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:3dc249c0-2c41-46c5-a9ec-f9214ae2ea91 BootID:cb39cae8-7171-4fb1-ac14-5b3907852f98 Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:36:ca:ea Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:36:ca:ea Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:38:61:02 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:71:af:18 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:94:6f:fc Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:da:98:15 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:02:ed:cf:9f:25:ae Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:7e:05:46:32:99:28 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.272323 4938 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.272443 4938 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.274056 4938 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.274208 4938 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.274243 4938 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.276171 4938 topology_manager.go:138] "Creating topology manager with none policy" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.276197 4938 container_manager_linux.go:303] "Creating device plugin manager" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.284182 4938 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.284216 4938 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.284423 4938 state_mem.go:36] "Initialized new in-memory state store" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.284514 4938 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.300717 4938 kubelet.go:418] "Attempting to sync node with API server" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.300768 4938 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.300790 4938 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.300810 4938 kubelet.go:324] "Adding apiserver pod source" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.300825 4938 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.329499 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.329657 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.329582 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.329784 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.331574 4938 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.337086 4938 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.338713 4938 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349498 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349527 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349534 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349541 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349553 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349561 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349568 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349578 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349587 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349594 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349605 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.349612 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.352218 4938 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.352777 4938 server.go:1280] "Started kubelet" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.353287 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:48 crc systemd[1]: Started Kubernetes Kubelet. Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.355934 4938 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.356042 4938 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.356741 4938 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.361237 4938 server.go:460] "Adding debug handlers to kubelet server" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.368904 4938 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.368973 4938 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.369281 4938 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 12:03:32.67859795 +0000 UTC Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.369362 4938 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 937h25m44.309240549s for next certificate rotation Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.369440 4938 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.369471 4938 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.369596 4938 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.369905 4938 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.370159 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="200ms" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.370677 4938 factory.go:55] Registering systemd factory Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.370662 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.370705 4938 factory.go:221] Registration of the systemd container factory successfully Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.370722 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.370995 4938 factory.go:153] Registering CRI-O factory Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.371019 4938 factory.go:221] Registration of the crio container factory successfully Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.371087 4938 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.371111 4938 factory.go:103] Registering Raw factory Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.371127 4938 manager.go:1196] Started watching for new ooms in manager Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.372024 4938 manager.go:319] Starting recovery of all containers Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403268 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403307 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403322 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403340 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403350 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403387 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403399 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403410 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403421 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403431 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403440 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403450 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403459 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403470 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403480 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403488 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403499 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403508 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.403517 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404290 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404314 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404328 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404337 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404347 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404356 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404366 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404393 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404404 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404414 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404425 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404435 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404445 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404455 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404464 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404474 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404484 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404496 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404506 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404516 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404526 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404535 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404545 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404560 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404571 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404582 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404594 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404604 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404614 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404624 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404634 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404644 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404654 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404668 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404679 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404690 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404700 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404712 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404722 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404731 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404743 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404753 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404762 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404771 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404781 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.404790 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405051 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405066 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405075 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405084 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405094 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405105 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405116 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405125 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405135 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405144 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405155 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405164 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405174 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405186 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405195 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405229 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405239 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405248 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405258 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405269 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405323 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405335 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405345 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405355 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405366 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405377 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405388 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405401 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405413 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405427 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405437 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405447 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405456 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405464 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405474 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405484 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405494 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405504 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405514 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405534 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405544 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405555 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405566 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405576 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405586 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405597 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405607 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405617 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405625 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405635 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405646 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405656 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405668 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405678 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405687 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405697 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405707 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405717 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405727 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405737 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405748 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405793 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405803 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405813 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405823 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405832 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405841 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405852 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405862 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405872 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405881 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405892 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405902 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405926 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405937 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405947 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405957 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405972 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405983 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.405992 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406001 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406010 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406019 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406028 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406038 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406048 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406057 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406066 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406077 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406086 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406096 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406107 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.406116 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.402446 4938 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.182:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a4de945344fa9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 10:37:48.352745385 +0000 UTC m=+0.820582774,LastTimestamp:2025-11-22 10:37:48.352745385 +0000 UTC m=+0.820582774,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413353 4938 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413382 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413398 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413411 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413422 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413435 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413448 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413459 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413469 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413480 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413520 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413531 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413543 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413556 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413566 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413576 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413588 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413598 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413609 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413619 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413629 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413643 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413657 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413670 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413703 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413717 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413730 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413740 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413748 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413759 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413769 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413781 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413792 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413802 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413812 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413822 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413832 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413843 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413853 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413862 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413872 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413882 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413894 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413903 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413929 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413939 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413948 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413959 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413977 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.413989 4938 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.414001 4938 reconstruct.go:97] "Volume reconstruction finished" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.414009 4938 reconciler.go:26] "Reconciler: start to sync state" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.423706 4938 manager.go:324] Recovery completed Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.435243 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.437024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.437091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.437106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.438505 4938 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.438529 4938 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.438608 4938 state_mem.go:36] "Initialized new in-memory state store" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.444552 4938 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.446099 4938 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.446150 4938 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.446180 4938 kubelet.go:2335] "Starting kubelet main sync loop" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.446355 4938 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.450861 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.451023 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.471267 4938 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.538409 4938 policy_none.go:49] "None policy: Start" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.539767 4938 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.539895 4938 state_mem.go:35] "Initializing new in-memory state store" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.547094 4938 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.571035 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="400ms" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.572468 4938 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600150 4938 manager.go:334] "Starting Device Plugin manager" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600215 4938 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600227 4938 server.go:79] "Starting device plugin registration server" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600614 4938 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600629 4938 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600829 4938 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600896 4938 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.600907 4938 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.606417 4938 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.701395 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.703360 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.703416 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.703424 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.703454 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.704062 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.747734 4938 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.748183 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.749995 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.750085 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.750106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.750461 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.751006 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.751242 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.753017 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.753084 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.753105 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.756196 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.756267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.756285 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.756492 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.756876 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.757002 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759357 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759542 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759560 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.759612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.760250 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.760349 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.762440 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.762486 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.762497 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.762672 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.762904 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.763039 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764319 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764355 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764373 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764479 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.764535 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.765125 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.765170 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.765188 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.765380 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.765509 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.767141 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.767174 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.767185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818192 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818282 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818329 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818470 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818508 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818572 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818606 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818671 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818740 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818773 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818836 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818868 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.818957 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.819025 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.819054 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: W1122 10:37:48.819888 4938 helpers.go:245] readString: Failed to read "/sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/cpuset.cpus.effective": read /sys/fs/cgroup/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/cpuset.cpus.effective: no such device Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.904853 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.906155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.906238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.906259 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.906328 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.906975 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920157 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920203 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920230 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920250 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920276 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920298 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920318 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920338 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920362 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920380 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920400 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920438 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920490 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920534 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920543 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920544 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920516 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920447 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920592 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920490 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920624 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920636 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920644 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920659 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920596 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920695 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920714 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920724 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920788 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: I1122 10:37:48.920811 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:37:48 crc kubenswrapper[4938]: E1122 10:37:48.972195 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="800ms" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.092837 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.108892 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.131286 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.148677 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-461796cfaf741dae72f981aa211efcc73ab9d3e3d87b75b56b2fef310e22aa88 WatchSource:0}: Error finding container 461796cfaf741dae72f981aa211efcc73ab9d3e3d87b75b56b2fef310e22aa88: Status 404 returned error can't find the container with id 461796cfaf741dae72f981aa211efcc73ab9d3e3d87b75b56b2fef310e22aa88 Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.150103 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.152064 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-765754598e18999810e8bbed4c3b01b6f8acb053b8dc2e1fa41e8c0873e586b3 WatchSource:0}: Error finding container 765754598e18999810e8bbed4c3b01b6f8acb053b8dc2e1fa41e8c0873e586b3: Status 404 returned error can't find the container with id 765754598e18999810e8bbed4c3b01b6f8acb053b8dc2e1fa41e8c0873e586b3 Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.155172 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-283fb2993a57f56b8a0668b6cc14a8b8b21eca62f0967b3e398caf179ed303eb WatchSource:0}: Error finding container 283fb2993a57f56b8a0668b6cc14a8b8b21eca62f0967b3e398caf179ed303eb: Status 404 returned error can't find the container with id 283fb2993a57f56b8a0668b6cc14a8b8b21eca62f0967b3e398caf179ed303eb Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.157773 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.170242 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-8f5375f7d5ca29902c67d60ddd8769610b092cccea8d06a24ab0b42518fa5a8e WatchSource:0}: Error finding container 8f5375f7d5ca29902c67d60ddd8769610b092cccea8d06a24ab0b42518fa5a8e: Status 404 returned error can't find the container with id 8f5375f7d5ca29902c67d60ddd8769610b092cccea8d06a24ab0b42518fa5a8e Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.179784 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-48656c33b851bcad3ec512af133d5d8ef97b7252664e35517c7a23a83acf1a85 WatchSource:0}: Error finding container 48656c33b851bcad3ec512af133d5d8ef97b7252664e35517c7a23a83acf1a85: Status 404 returned error can't find the container with id 48656c33b851bcad3ec512af133d5d8ef97b7252664e35517c7a23a83acf1a85 Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.307998 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.309130 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.309171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.309184 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.309216 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.309679 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.354875 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.450000 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"48656c33b851bcad3ec512af133d5d8ef97b7252664e35517c7a23a83acf1a85"} Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.450748 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8f5375f7d5ca29902c67d60ddd8769610b092cccea8d06a24ab0b42518fa5a8e"} Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.451679 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"283fb2993a57f56b8a0668b6cc14a8b8b21eca62f0967b3e398caf179ed303eb"} Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.452445 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"461796cfaf741dae72f981aa211efcc73ab9d3e3d87b75b56b2fef310e22aa88"} Nov 22 10:37:49 crc kubenswrapper[4938]: I1122 10:37:49.453342 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"765754598e18999810e8bbed4c3b01b6f8acb053b8dc2e1fa41e8c0873e586b3"} Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.603168 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.603275 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.773671 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="1.6s" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.830462 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.830542 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.851405 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.851481 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:49 crc kubenswrapper[4938]: W1122 10:37:49.858289 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:49 crc kubenswrapper[4938]: E1122 10:37:49.858327 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.110094 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.111597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.111697 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.111714 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.111744 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:50 crc kubenswrapper[4938]: E1122 10:37:50.112422 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.354398 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.458963 4938 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64" exitCode=0 Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.459058 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.459110 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.460171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.460200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.460215 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.461193 4938 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd" exitCode=0 Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.461257 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.461268 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.462236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.462281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.462295 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.465140 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.465170 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.465183 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.466575 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a" exitCode=0 Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.466637 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.466699 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.467655 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.467691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.467703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.468301 4938 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0248a3ffa73fa20db2fc5ddf0324410eacc266c322e55227dbcf8341c42cc559" exitCode=0 Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.468328 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0248a3ffa73fa20db2fc5ddf0324410eacc266c322e55227dbcf8341c42cc559"} Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.468465 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.468968 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.469481 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.469540 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.469556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.470136 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.470164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:50 crc kubenswrapper[4938]: I1122 10:37:50.470178 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.354579 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:51 crc kubenswrapper[4938]: E1122 10:37:51.374978 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="3.2s" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.475861 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.475909 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.478457 4938 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b6a44c1742b72de50ae3e79ad9bea06bd2c7b943229916b9b8df8d8115dff0f7" exitCode=0 Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.478601 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.478558 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b6a44c1742b72de50ae3e79ad9bea06bd2c7b943229916b9b8df8d8115dff0f7"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.480006 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.480043 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.480054 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.480977 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.481041 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.482102 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.482155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.482194 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.483881 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.483951 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.486857 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea"} Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.486939 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.487826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.487856 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.487867 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.713007 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.714585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.714633 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.714644 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:51 crc kubenswrapper[4938]: I1122 10:37:51.714688 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:51 crc kubenswrapper[4938]: E1122 10:37:51.715405 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:52 crc kubenswrapper[4938]: W1122 10:37:52.169345 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:52 crc kubenswrapper[4938]: E1122 10:37:52.169430 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:52 crc kubenswrapper[4938]: W1122 10:37:52.271936 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:52 crc kubenswrapper[4938]: E1122 10:37:52.272008 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.354036 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:52 crc kubenswrapper[4938]: W1122 10:37:52.381684 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:52 crc kubenswrapper[4938]: E1122 10:37:52.381773 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.502005 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66"} Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.502067 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce"} Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.502077 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e"} Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.502191 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.502988 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.503041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.503050 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.506043 4938 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="707a8f855bd1dfa346be2997b137323cc07ec7bb2565c5b61de2b2e2197948a5" exitCode=0 Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.506105 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"707a8f855bd1dfa346be2997b137323cc07ec7bb2565c5b61de2b2e2197948a5"} Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.506213 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.507636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.507658 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.507666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.509603 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.509642 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9"} Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.509701 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.512455 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.512623 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.512656 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.512668 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513309 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513363 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513402 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513404 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513424 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.513435 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.709266 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:52 crc kubenswrapper[4938]: W1122 10:37:52.742953 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:52 crc kubenswrapper[4938]: E1122 10:37:52.743050 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.182:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:37:52 crc kubenswrapper[4938]: I1122 10:37:52.837865 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.354392 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514645 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514657 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8d0c1e1dce4c3b36595c21c681c2bf72797b4c64154e28eb9c5764c01e3e2b25"} Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514696 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"08cb862961be7704390c7914e6b1b07ec5acfc0a8684b219628b3374e0503cf5"} Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514709 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1bea9ce72194c381c26a26ad8785fcbf3814d9ef4550e0a10fa9038e22fa1ed0"} Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514745 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.514803 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515240 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515268 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515753 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515771 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515794 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515778 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515798 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515930 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:53 crc kubenswrapper[4938]: I1122 10:37:53.515939 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:53 crc kubenswrapper[4938]: E1122 10:37:53.642994 4938 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.182:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a4de945344fa9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 10:37:48.352745385 +0000 UTC m=+0.820582774,LastTimestamp:2025-11-22 10:37:48.352745385 +0000 UTC m=+0.820582774,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.354670 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.520413 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.522179 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66" exitCode=255 Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.522296 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66"} Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.522366 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.523338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.523374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.523387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.524059 4938 scope.go:117] "RemoveContainer" containerID="7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.528978 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.528972 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a3d43c7f41bef25e5f9e54cc4bad63528c65b3d8fbc973019dd23cd8ca518af4"} Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529052 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529043 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a5b66c1cb8d341db26bfd1f5b341172bb1f349df3f2876644ab78c31c9b4ce36"} Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529107 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529900 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.529929 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.530096 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.530122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.530133 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.530997 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.531056 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.531080 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:54 crc kubenswrapper[4938]: E1122 10:37:54.576707 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.182:6443: connect: connection refused" interval="6.4s" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.916461 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.917707 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.917737 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.917746 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:54 crc kubenswrapper[4938]: I1122 10:37:54.917770 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:37:54 crc kubenswrapper[4938]: E1122 10:37:54.918395 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.182:6443: connect: connection refused" node="crc" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.247993 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.354551 4938 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.182:6443: connect: connection refused Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.533380 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.534825 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683"} Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.534867 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.534874 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535728 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535742 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535744 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535751 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:55 crc kubenswrapper[4938]: I1122 10:37:55.535755 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.537127 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.537205 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.537884 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.537942 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.537953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:56 crc kubenswrapper[4938]: I1122 10:37:56.546508 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.066057 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.066255 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.067749 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.067799 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.067808 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.541522 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.543209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.543243 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:57 crc kubenswrapper[4938]: I1122 10:37:57.543253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.077839 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.078046 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.079371 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.079412 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.079423 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.144455 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.239608 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.543399 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.543436 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.544433 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.544463 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.544474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.545096 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.545134 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.545148 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:58 crc kubenswrapper[4938]: E1122 10:37:58.606532 4938 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.798428 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.798638 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.799974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.800010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:58 crc kubenswrapper[4938]: I1122 10:37:58.800026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:59 crc kubenswrapper[4938]: I1122 10:37:59.545965 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:37:59 crc kubenswrapper[4938]: I1122 10:37:59.547201 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:37:59 crc kubenswrapper[4938]: I1122 10:37:59.547267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:37:59 crc kubenswrapper[4938]: I1122 10:37:59.547291 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:37:59 crc kubenswrapper[4938]: I1122 10:37:59.552701 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:38:00 crc kubenswrapper[4938]: I1122 10:38:00.547968 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:38:00 crc kubenswrapper[4938]: I1122 10:38:00.549436 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:00 crc kubenswrapper[4938]: I1122 10:38:00.549463 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:00 crc kubenswrapper[4938]: I1122 10:38:00.549473 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.078813 4938 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.078888 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.318895 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.320322 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.320364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.320377 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:01 crc kubenswrapper[4938]: I1122 10:38:01.320409 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:38:05 crc kubenswrapper[4938]: W1122 10:38:05.970569 4938 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 22 10:38:05 crc kubenswrapper[4938]: I1122 10:38:05.970689 4938 trace.go:236] Trace[366551301]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:37:55.968) (total time: 10001ms): Nov 22 10:38:05 crc kubenswrapper[4938]: Trace[366551301]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (10:38:05.970) Nov 22 10:38:05 crc kubenswrapper[4938]: Trace[366551301]: [10.001691919s] [10.001691919s] END Nov 22 10:38:05 crc kubenswrapper[4938]: E1122 10:38:05.970719 4938 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.053296 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.053383 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.060899 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.060958 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.553148 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]log ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]etcd ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/generic-apiserver-start-informers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/priority-and-fairness-filter ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-apiextensions-informers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-apiextensions-controllers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/crd-informer-synced ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-system-namespaces-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 22 10:38:06 crc kubenswrapper[4938]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/bootstrap-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/start-kube-aggregator-informers ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-registration-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-discovery-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]autoregister-completion ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-openapi-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 22 10:38:06 crc kubenswrapper[4938]: livez check failed Nov 22 10:38:06 crc kubenswrapper[4938]: I1122 10:38:06.555578 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:38:08 crc kubenswrapper[4938]: E1122 10:38:08.606641 4938 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.835000 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.835246 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.836740 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.836784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.836804 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:08 crc kubenswrapper[4938]: I1122 10:38:08.856202 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 22 10:38:09 crc kubenswrapper[4938]: I1122 10:38:09.566888 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:38:09 crc kubenswrapper[4938]: I1122 10:38:09.567975 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:09 crc kubenswrapper[4938]: I1122 10:38:09.568052 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:09 crc kubenswrapper[4938]: I1122 10:38:09.568065 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.054081 4938 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.054658 4938 trace.go:236] Trace[1029748768]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:37:56.607) (total time: 14447ms): Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1029748768]: ---"Objects listed" error: 14447ms (10:38:11.054) Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1029748768]: [14.447327664s] [14.447327664s] END Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.054688 4938 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.059966 4938 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.078375 4938 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.078610 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.128475 4938 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.129838 4938 trace.go:236] Trace[1200120761]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:37:58.025) (total time: 13103ms): Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1200120761]: ---"Objects listed" error: 13103ms (10:38:11.129) Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1200120761]: [13.103997897s] [13.103997897s] END Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.129863 4938 trace.go:236] Trace[1191523437]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:37:57.723) (total time: 13406ms): Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1191523437]: ---"Objects listed" error: 13406ms (10:38:11.129) Nov 22 10:38:11 crc kubenswrapper[4938]: Trace[1191523437]: [13.406553971s] [13.406553971s] END Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.129879 4938 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.129869 4938 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.180172 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:35858->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.180225 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:35858->192.168.126.11:17697: read: connection reset by peer" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.194112 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.194157 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.315616 4938 apiserver.go:52] "Watching apiserver" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.329837 4938 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.330124 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.330436 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.330538 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.330577 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.330689 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.330736 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.330891 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.331411 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.331446 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.331585 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.332049 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.332695 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.332854 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.333128 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.333527 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.333671 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.333797 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.333989 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.334135 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.354245 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.369722 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.370067 4938 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430351 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430427 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430467 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430496 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430530 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430562 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430593 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430623 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430652 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430682 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430712 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430744 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430774 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430783 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430798 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430803 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430840 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430857 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430874 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430889 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430950 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430968 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430981 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.430996 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431010 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431024 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431040 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431055 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431070 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431087 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431102 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431082 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431115 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431180 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431234 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431258 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431271 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431276 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431293 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431308 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431324 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431339 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431354 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431370 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431388 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431401 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431416 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431431 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431446 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431459 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431461 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431474 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431491 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431508 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431522 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431537 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431552 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431566 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431581 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431597 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431614 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431629 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431630 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431646 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431663 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431678 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431694 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431708 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431725 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431740 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431756 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431770 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431785 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431784 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431816 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431800 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431874 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.431894 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432017 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432036 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432072 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432129 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432172 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432204 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432235 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432243 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432268 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432298 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432328 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432360 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432433 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432464 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432497 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432529 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432562 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432595 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432628 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432660 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432692 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432738 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432803 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432843 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432887 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432982 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433034 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433083 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433137 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433188 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433230 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433267 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433297 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433329 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433361 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433392 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433425 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433457 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433490 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433523 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433557 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433589 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433624 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433657 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433692 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433727 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433759 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433795 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433827 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433857 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433892 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433969 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434050 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434093 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434126 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434159 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434193 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434226 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434261 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434294 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434332 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434364 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434428 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434461 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434496 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434538 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434573 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434605 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434639 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434673 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434704 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434738 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434774 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434806 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434841 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434879 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434944 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434998 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435034 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435136 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435173 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435217 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435262 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435424 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435470 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435504 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435540 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435575 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435616 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435649 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435686 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435729 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435780 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435825 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435860 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435896 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435984 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436029 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436063 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436100 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436136 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436175 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436209 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436246 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436281 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436316 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436358 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436397 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436431 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436467 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436519 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436573 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436624 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436659 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436694 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436736 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436787 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436839 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436879 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436956 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437008 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437055 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437092 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437132 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437168 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437208 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437244 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437281 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437343 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437388 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437436 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437478 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437514 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437554 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437593 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437629 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437671 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437746 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437781 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437817 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437855 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437956 4938 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437989 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438011 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438031 4938 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438084 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438111 4938 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438132 4938 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438155 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438176 4938 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438197 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432394 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432441 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432534 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432638 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432946 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.432901 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433202 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433221 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433234 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433418 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433439 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433449 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433586 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433762 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433820 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.433893 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434143 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434183 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434417 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434451 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434471 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434640 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434671 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434701 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434839 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.434905 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.435023 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436150 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436359 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436370 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436606 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436668 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.436814 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437025 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437048 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437321 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437345 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437398 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437519 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437666 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437738 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437741 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437940 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.437995 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438192 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438361 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438549 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.438716 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.439224 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.439398 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.439579 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.439822 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.439889 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.440070 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.440310 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.440612 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.440685 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.440825 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.441131 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.441151 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.441428 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.441627 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.441836 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442189 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442233 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442324 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442435 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442601 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442748 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442809 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442819 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.442931 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.445141 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.445334 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.445760 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.445885 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.445979 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446039 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446245 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446507 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446538 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446579 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446669 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446789 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.446916 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447020 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447144 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447394 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447549 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447651 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447706 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447828 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447965 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.447991 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448011 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448029 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448137 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448377 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448582 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448749 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448866 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448944 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.448882 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450130 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.450277 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:11.950256195 +0000 UTC m=+24.418093614 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450318 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450344 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450622 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450758 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.450964 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451066 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451100 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451138 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451207 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451310 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451393 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451405 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451428 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451443 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451582 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451709 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451899 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.451982 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452131 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452212 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452210 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452405 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452536 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.452984 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453135 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453167 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453249 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453333 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453553 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.453632 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.453740 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:11.953710732 +0000 UTC m=+24.421548221 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453718 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.453958 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.454004 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.454041 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.454161 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.454968 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.455207 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.455466 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.455509 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.455551 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.455588 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:11.955569458 +0000 UTC m=+24.423406857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.455751 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.456094 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.456294 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.456871 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457284 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457433 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457597 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457680 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457684 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457749 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.457897 4938 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.458002 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.458133 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.458397 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.458621 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.458770 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459184 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459302 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459335 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459471 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459518 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459605 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.459619 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.460082 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.460099 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.460266 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.460347 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.460978 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.461005 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.461891 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.461919 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462028 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462170 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462295 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462471 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462605 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.462998 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.464885 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.465391 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.475160 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.478569 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.484047 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.538876 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.538972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539024 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539038 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539049 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539058 4938 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539067 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539078 4938 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539087 4938 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539095 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539102 4938 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539110 4938 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539119 4938 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539128 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539140 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539151 4938 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539192 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539203 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539213 4938 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539224 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539234 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539244 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539252 4938 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539262 4938 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539269 4938 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539277 4938 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539286 4938 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539296 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539304 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539311 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539319 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539327 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539335 4938 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539342 4938 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539352 4938 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539364 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539375 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539385 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539393 4938 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539401 4938 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539409 4938 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539419 4938 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539430 4938 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539439 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539447 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539456 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539466 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539474 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539482 4938 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539490 4938 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539498 4938 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539511 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539519 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539528 4938 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539536 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539543 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539552 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539559 4938 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539568 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539576 4938 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539584 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539591 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539599 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539607 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539615 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539623 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539631 4938 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539640 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539648 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539656 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539664 4938 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539672 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539681 4938 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539689 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539697 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539705 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539714 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539722 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539730 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539738 4938 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539746 4938 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539755 4938 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539763 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539774 4938 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539782 4938 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539789 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539797 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539805 4938 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539812 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539820 4938 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539828 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539836 4938 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539843 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539850 4938 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539857 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539865 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539874 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539882 4938 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539890 4938 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539898 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539906 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539916 4938 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539937 4938 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539946 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539954 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539962 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539969 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539978 4938 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539986 4938 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.539994 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540001 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540010 4938 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540017 4938 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540025 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540033 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540042 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540050 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540057 4938 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540065 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540075 4938 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540082 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540090 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540098 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540106 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540113 4938 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540121 4938 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540129 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540137 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540144 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540152 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540160 4938 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540168 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540176 4938 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540183 4938 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540191 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540199 4938 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540207 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540214 4938 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540222 4938 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540230 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540238 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540247 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540254 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540263 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540271 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540278 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540286 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540296 4938 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540303 4938 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540311 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540318 4938 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540327 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540335 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540342 4938 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540349 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540357 4938 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540365 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540372 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540381 4938 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540389 4938 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540396 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540404 4938 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540412 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540420 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540428 4938 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540437 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540445 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540453 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540461 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540469 4938 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540477 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540486 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540494 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540501 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540509 4938 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540517 4938 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540525 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540532 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540540 4938 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540549 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540557 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540565 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540573 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540582 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540590 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540631 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.540679 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.675828 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.676341 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.685656 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.697468 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.705757 4938 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.705836 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.709851 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.709882 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.709894 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.709971 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:12.209954506 +0000 UTC m=+24.677791905 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.723434 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.723707 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.724082 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:11 crc kubenswrapper[4938]: E1122 10:38:11.724286 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:12.224255105 +0000 UTC m=+24.692092534 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.724591 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.728239 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.734522 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.735817 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.741586 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.741889 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.747226 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.757865 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-xdnvn"] Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.758159 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.764000 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.764394 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.765562 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.767249 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.771666 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.784181 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.801821 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.816132 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.828133 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.837726 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.842687 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/13ff9c31-ec9f-417b-8237-65660901d3ad-hosts-file\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.842894 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpp5q\" (UniqueName: \"kubernetes.io/projected/13ff9c31-ec9f-417b-8237-65660901d3ad-kube-api-access-kpp5q\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.844639 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.858152 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:37:54Z\\\",\\\"message\\\":\\\"W1122 10:37:53.516710 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:37:53.517126 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807873 cert, and key in /tmp/serving-cert-4121852179/serving-signer.crt, /tmp/serving-cert-4121852179/serving-signer.key\\\\nI1122 10:37:53.755053 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:37:53.759184 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:37:53.759343 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:37:53.760019 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4121852179/tls.crt::/tmp/serving-cert-4121852179/tls.key\\\\\\\"\\\\nF1122 10:37:53.975428 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.869596 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.882131 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.890194 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.942367 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.943340 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/13ff9c31-ec9f-417b-8237-65660901d3ad-hosts-file\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.943389 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpp5q\" (UniqueName: \"kubernetes.io/projected/13ff9c31-ec9f-417b-8237-65660901d3ad-kube-api-access-kpp5q\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.943690 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/13ff9c31-ec9f-417b-8237-65660901d3ad-hosts-file\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.949362 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.955645 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:38:11 crc kubenswrapper[4938]: I1122 10:38:11.961370 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpp5q\" (UniqueName: \"kubernetes.io/projected/13ff9c31-ec9f-417b-8237-65660901d3ad-kube-api-access-kpp5q\") pod \"node-resolver-xdnvn\" (UID: \"13ff9c31-ec9f-417b-8237-65660901d3ad\") " pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:11 crc kubenswrapper[4938]: W1122 10:38:11.964286 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-f8ab6b1a868137af8603a9076d04947a4ca3c306f33d979e9c0919bd61ec8ce9 WatchSource:0}: Error finding container f8ab6b1a868137af8603a9076d04947a4ca3c306f33d979e9c0919bd61ec8ce9: Status 404 returned error can't find the container with id f8ab6b1a868137af8603a9076d04947a4ca3c306f33d979e9c0919bd61ec8ce9 Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.045698 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.045794 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.045837 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.045967 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.046025 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:13.046007621 +0000 UTC m=+25.513845020 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.046094 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:13.046086903 +0000 UTC m=+25.513924302 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.046129 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.046148 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:13.046143084 +0000 UTC m=+25.513980483 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.068357 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-xdnvn" Nov 22 10:38:12 crc kubenswrapper[4938]: W1122 10:38:12.098441 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13ff9c31_ec9f_417b_8237_65660901d3ad.slice/crio-38ef43cbfa72771434c1fa37c8235f88ada5659f903b475449d88fc279ef02bc WatchSource:0}: Error finding container 38ef43cbfa72771434c1fa37c8235f88ada5659f903b475449d88fc279ef02bc: Status 404 returned error can't find the container with id 38ef43cbfa72771434c1fa37c8235f88ada5659f903b475449d88fc279ef02bc Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.248591 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.248640 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248779 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248796 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248807 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248808 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248844 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248859 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248847 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:13.248834835 +0000 UTC m=+25.716672234 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.248960 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:13.248941138 +0000 UTC m=+25.716778547 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.446813 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.446986 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.451667 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.452201 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.453379 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.453980 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.454886 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.455364 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.455936 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.456827 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.457417 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.458298 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.458833 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.459881 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.460433 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.460963 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.461909 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.462412 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.463298 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.463722 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.464252 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.465170 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.465598 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.466967 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.467379 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.468488 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.468989 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.469550 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.470562 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.471024 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.471881 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.472355 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.473293 4938 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.473391 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.474927 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.475770 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.476215 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.477651 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.478382 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.479194 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.479799 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.480791 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.481331 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.482242 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.482847 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.483868 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.484327 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.485603 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.486091 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.487143 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.487588 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.488447 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.488888 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.489843 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.490413 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.490883 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.575198 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xdnvn" event={"ID":"13ff9c31-ec9f-417b-8237-65660901d3ad","Type":"ContainerStarted","Data":"38ef43cbfa72771434c1fa37c8235f88ada5659f903b475449d88fc279ef02bc"} Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.576190 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f8ab6b1a868137af8603a9076d04947a4ca3c306f33d979e9c0919bd61ec8ce9"} Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.577209 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"704ca4e3b8e2dfddf05fdc6c793ee0c50ec3f121e5b57b70537164d36e23e15f"} Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.578403 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"372c39b98685356f02526398e274a44411ed080a337bef89c8639ef03b340e8b"} Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.580524 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.581250 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.582854 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683" exitCode=255 Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.582883 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683"} Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.582966 4938 scope.go:117] "RemoveContainer" containerID="7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.583876 4938 scope.go:117] "RemoveContainer" containerID="d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683" Nov 22 10:38:12 crc kubenswrapper[4938]: E1122 10:38:12.584068 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.607416 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:37:54Z\\\",\\\"message\\\":\\\"W1122 10:37:53.516710 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:37:53.517126 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807873 cert, and key in /tmp/serving-cert-4121852179/serving-signer.crt, /tmp/serving-cert-4121852179/serving-signer.key\\\\nI1122 10:37:53.755053 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:37:53.759184 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:37:53.759343 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:37:53.760019 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4121852179/tls.crt::/tmp/serving-cert-4121852179/tls.key\\\\\\\"\\\\nF1122 10:37:53.975428 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.619261 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.631801 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.641070 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.651092 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.662163 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.673702 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.679369 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.955365 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-6kr67"] Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.956242 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.963889 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.963986 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.964025 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8sphc"] Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.965102 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.968452 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-slzgc"] Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.968747 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.968869 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.968987 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.969029 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.969100 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.969133 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-j67hq"] Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.969376 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j67hq" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.969746 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.970006 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.970276 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.970310 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.970331 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.971341 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975264 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975475 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975493 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975481 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975524 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975493 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.975478 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 22 10:38:12 crc kubenswrapper[4938]: I1122 10:38:12.991589 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.020541 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.027019 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.036379 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:37:54Z\\\",\\\"message\\\":\\\"W1122 10:37:53.516710 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:37:53.517126 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807873 cert, and key in /tmp/serving-cert-4121852179/serving-signer.crt, /tmp/serving-cert-4121852179/serving-signer.key\\\\nI1122 10:37:53.755053 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:37:53.759184 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:37:53.759343 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:37:53.760019 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4121852179/tls.crt::/tmp/serving-cert-4121852179/tls.key\\\\\\\"\\\\nF1122 10:37:53.975428 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.045617 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.055626 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.055807 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.056397 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:15.056364081 +0000 UTC m=+27.524201490 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056469 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-cnibin\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056538 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-system-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056571 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-kubelet\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056611 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056638 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056658 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056682 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056842 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.056953 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-os-release\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057016 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-multus-daemon-config\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057067 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057110 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057166 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2b98cee-eb10-409f-93b6-153856457611-proxy-tls\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057202 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-etc-kubernetes\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057233 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057278 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057351 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-multus-certs\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057386 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2b98cee-eb10-409f-93b6-153856457611-mcd-auth-proxy-config\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057412 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-socket-dir-parent\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057437 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-k8s-cni-cncf-io\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057501 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-bin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057543 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057587 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057651 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-binary-copy\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057694 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgw8t\" (UniqueName: \"kubernetes.io/projected/a430bfdd-4d1d-4bda-82ec-884f775af556-kube-api-access-sgw8t\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057727 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057760 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz2bx\" (UniqueName: \"kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057794 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zxdk\" (UniqueName: \"kubernetes.io/projected/671da3f6-347d-4f86-890d-155ef844b1f6-kube-api-access-5zxdk\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057835 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057902 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057956 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-system-cni-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.057986 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-hostroot\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058020 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058041 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058069 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-conf-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058104 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-cnibin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058138 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058164 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058193 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058222 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058251 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058282 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-netns\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058306 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058333 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2b98cee-eb10-409f-93b6-153856457611-rootfs\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058362 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnjbh\" (UniqueName: \"kubernetes.io/projected/e2b98cee-eb10-409f-93b6-153856457611-kube-api-access-mnjbh\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058387 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058412 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-os-release\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058429 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-cni-binary-copy\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.058453 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-multus\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.059140 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.059330 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:15.059304794 +0000 UTC m=+27.527142193 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.059362 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.059469 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:15.059444938 +0000 UTC m=+27.527282337 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.069471 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.076620 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.087202 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.098240 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.107194 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.116706 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.124358 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.133517 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:37:54Z\\\",\\\"message\\\":\\\"W1122 10:37:53.516710 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:37:53.517126 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807873 cert, and key in /tmp/serving-cert-4121852179/serving-signer.crt, /tmp/serving-cert-4121852179/serving-signer.key\\\\nI1122 10:37:53.755053 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:37:53.759184 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:37:53.759343 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:37:53.760019 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4121852179/tls.crt::/tmp/serving-cert-4121852179/tls.key\\\\\\\"\\\\nF1122 10:37:53.975428 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.142810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.151407 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159226 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz2bx\" (UniqueName: \"kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159264 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-binary-copy\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159285 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgw8t\" (UniqueName: \"kubernetes.io/projected/a430bfdd-4d1d-4bda-82ec-884f775af556-kube-api-access-sgw8t\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159312 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159331 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zxdk\" (UniqueName: \"kubernetes.io/projected/671da3f6-347d-4f86-890d-155ef844b1f6-kube-api-access-5zxdk\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159347 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159364 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159388 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-system-cni-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159438 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-hostroot\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159453 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159470 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-conf-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159486 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159501 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-cnibin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159518 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159557 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159573 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159589 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159604 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-netns\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159620 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159630 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159636 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-multus\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159673 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-multus\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159688 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2b98cee-eb10-409f-93b6-153856457611-rootfs\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159707 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-conf-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnjbh\" (UniqueName: \"kubernetes.io/projected/e2b98cee-eb10-409f-93b6-153856457611-kube-api-access-mnjbh\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159735 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159750 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-os-release\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159746 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159767 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-cni-binary-copy\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159823 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-kubelet\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159864 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-cnibin\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159880 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-system-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159898 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159932 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159951 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159969 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-os-release\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159985 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-multus-daemon-config\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160000 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160015 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160032 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160065 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-binary-copy\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160081 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2b98cee-eb10-409f-93b6-153856457611-proxy-tls\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160107 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160130 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160148 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-multus-certs\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160163 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-etc-kubernetes\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160179 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160183 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160195 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160231 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2b98cee-eb10-409f-93b6-153856457611-mcd-auth-proxy-config\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160252 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-socket-dir-parent\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160270 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-k8s-cni-cncf-io\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160329 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-bin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160345 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160379 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160404 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160432 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-system-cni-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160464 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-hostroot\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160440 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-cni-binary-copy\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160498 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/e2b98cee-eb10-409f-93b6-153856457611-rootfs\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160522 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160545 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-netns\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.159602 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160696 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160731 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-multus-socket-dir-parent\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160754 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-multus-certs\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160751 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-run-k8s-cni-cncf-io\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160778 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160818 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-etc-kubernetes\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160848 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-cni-bin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160877 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e2b98cee-eb10-409f-93b6-153856457611-mcd-auth-proxy-config\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160917 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160937 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-cnibin\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160880 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160962 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160970 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-host-var-lib-kubelet\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160987 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-cnibin\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.160993 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161077 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-system-cni-dir\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161179 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/671da3f6-347d-4f86-890d-155ef844b1f6-os-release\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161516 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161548 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/671da3f6-347d-4f86-890d-155ef844b1f6-multus-daemon-config\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161552 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161574 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161592 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161609 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161615 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.161656 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-os-release\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.162446 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a430bfdd-4d1d-4bda-82ec-884f775af556-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.163452 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.176443 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e2b98cee-eb10-409f-93b6-153856457611-proxy-tls\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.176775 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.184535 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zxdk\" (UniqueName: \"kubernetes.io/projected/671da3f6-347d-4f86-890d-155ef844b1f6-kube-api-access-5zxdk\") pod \"multus-j67hq\" (UID: \"671da3f6-347d-4f86-890d-155ef844b1f6\") " pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.184550 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgw8t\" (UniqueName: \"kubernetes.io/projected/a430bfdd-4d1d-4bda-82ec-884f775af556-kube-api-access-sgw8t\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.185712 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.185974 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz2bx\" (UniqueName: \"kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx\") pod \"ovnkube-node-8sphc\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.191673 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnjbh\" (UniqueName: \"kubernetes.io/projected/e2b98cee-eb10-409f-93b6-153856457611-kube-api-access-mnjbh\") pod \"machine-config-daemon-slzgc\" (UID: \"e2b98cee-eb10-409f-93b6-153856457611\") " pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.210757 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.218999 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.227743 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.261352 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.261449 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.261576 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.261596 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.261631 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.261683 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:15.261665228 +0000 UTC m=+27.729502627 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.262025 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.262057 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.262068 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.262098 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:15.262089109 +0000 UTC m=+27.729926508 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.277647 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.279705 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a430bfdd-4d1d-4bda-82ec-884f775af556-tuning-conf-dir\") pod \"multus-additional-cni-plugins-6kr67\" (UID: \"a430bfdd-4d1d-4bda-82ec-884f775af556\") " pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.284985 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.292034 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j67hq" Nov 22 10:38:13 crc kubenswrapper[4938]: W1122 10:38:13.306409 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b4b8200_248f_47ae_bed3_cbfd4598b99d.slice/crio-5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3 WatchSource:0}: Error finding container 5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3: Status 404 returned error can't find the container with id 5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3 Nov 22 10:38:13 crc kubenswrapper[4938]: W1122 10:38:13.313791 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2b98cee_eb10_409f_93b6_153856457611.slice/crio-14cfaa271b75ccfca054672c9b7608fdcd887a75618639e38928cb830699b2bb WatchSource:0}: Error finding container 14cfaa271b75ccfca054672c9b7608fdcd887a75618639e38928cb830699b2bb: Status 404 returned error can't find the container with id 14cfaa271b75ccfca054672c9b7608fdcd887a75618639e38928cb830699b2bb Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.446605 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.446641 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.446742 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.446809 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.568304 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-6kr67" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.587340 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3" exitCode=0 Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.587417 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.587491 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.588904 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerStarted","Data":"43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.589025 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerStarted","Data":"41e2cc7125fa0593f059c63f3401596e377f49cd8d78b0985261839b076d1a39"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.591671 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.593495 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-xdnvn" event={"ID":"13ff9c31-ec9f-417b-8237-65660901d3ad","Type":"ContainerStarted","Data":"4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.595207 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.595228 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.598019 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.605978 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"14cfaa271b75ccfca054672c9b7608fdcd887a75618639e38928cb830699b2bb"} Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.606373 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.614611 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.620721 4938 scope.go:117] "RemoveContainer" containerID="d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683" Nov 22 10:38:13 crc kubenswrapper[4938]: E1122 10:38:13.620934 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.621400 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.629110 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.643868 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.651323 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.662576 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d7532a6c9b6ea41faa073cca8ec1f0bec26f02dbde758bf19313728a4fcba66\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:37:54Z\\\",\\\"message\\\":\\\"W1122 10:37:53.516710 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:37:53.517126 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807873 cert, and key in /tmp/serving-cert-4121852179/serving-signer.crt, /tmp/serving-cert-4121852179/serving-signer.key\\\\nI1122 10:37:53.755053 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:37:53.759184 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:37:53.759343 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:37:53.760019 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4121852179/tls.crt::/tmp/serving-cert-4121852179/tls.key\\\\\\\"\\\\nF1122 10:37:53.975428 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.671192 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.679361 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.689779 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.698831 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.709176 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.724054 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.732934 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.745228 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.755762 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.766769 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.776867 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.789025 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.798370 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.808469 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.826980 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.836685 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.847613 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:13 crc kubenswrapper[4938]: I1122 10:38:13.856141 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.433653 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-8l8nr"] Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.434047 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.436264 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.436879 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.437322 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.437483 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.446430 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:14 crc kubenswrapper[4938]: E1122 10:38:14.446571 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.446617 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.458978 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.476895 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.484473 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.498732 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.509805 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.521645 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.530330 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.538360 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.547045 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.554188 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.568265 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.575796 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3e57739-79cd-498f-8e4b-8423b0fb5306-host\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.575850 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j4sd\" (UniqueName: \"kubernetes.io/projected/f3e57739-79cd-498f-8e4b-8423b0fb5306-kube-api-access-9j4sd\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.575876 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e57739-79cd-498f-8e4b-8423b0fb5306-serviceca\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.577109 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.624935 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.626073 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.626106 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"a6a0e50fb858fbd5d1b0e4021626b6775a75ce658f7216b19ea51cee5a400252"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.627851 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.630510 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.630552 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7"} Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.637575 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.648012 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.663767 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.666477 4938 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.671245 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.676612 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j4sd\" (UniqueName: \"kubernetes.io/projected/f3e57739-79cd-498f-8e4b-8423b0fb5306-kube-api-access-9j4sd\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.676653 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e57739-79cd-498f-8e4b-8423b0fb5306-serviceca\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.676683 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3e57739-79cd-498f-8e4b-8423b0fb5306-host\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.676735 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f3e57739-79cd-498f-8e4b-8423b0fb5306-host\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.677673 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e57739-79cd-498f-8e4b-8423b0fb5306-serviceca\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.678973 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.686646 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.692753 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.698298 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j4sd\" (UniqueName: \"kubernetes.io/projected/f3e57739-79cd-498f-8e4b-8423b0fb5306-kube-api-access-9j4sd\") pod \"node-ca-8l8nr\" (UID: \"f3e57739-79cd-498f-8e4b-8423b0fb5306\") " pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.703434 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.712428 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.717933 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.728802 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.744798 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.749320 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8l8nr" Nov 22 10:38:14 crc kubenswrapper[4938]: W1122 10:38:14.765479 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3e57739_79cd_498f_8e4b_8423b0fb5306.slice/crio-2466bd4242a77876fce1e39ced12d65209f1c8a60483c9ffec4044b0310a3bcd WatchSource:0}: Error finding container 2466bd4242a77876fce1e39ced12d65209f1c8a60483c9ffec4044b0310a3bcd: Status 404 returned error can't find the container with id 2466bd4242a77876fce1e39ced12d65209f1c8a60483c9ffec4044b0310a3bcd Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.766452 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.799780 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.812139 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.826132 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.838727 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.847290 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.853298 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.866034 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.875893 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.883418 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.898042 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.908272 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.917823 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:14 crc kubenswrapper[4938]: I1122 10:38:14.927695 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.080562 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.081124 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:19.081104763 +0000 UTC m=+31.548942162 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.081329 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.081356 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.081462 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.081509 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:19.081500923 +0000 UTC m=+31.549338322 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.081789 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.081820 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:19.081812301 +0000 UTC m=+31.549649700 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.283661 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.283735 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283811 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283811 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283825 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283833 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283838 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283842 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283881 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:19.283869467 +0000 UTC m=+31.751706866 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.283895 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:19.283888787 +0000 UTC m=+31.751726186 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.446856 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.446887 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.447017 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:15 crc kubenswrapper[4938]: E1122 10:38:15.447207 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.634330 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8l8nr" event={"ID":"f3e57739-79cd-498f-8e4b-8423b0fb5306","Type":"ContainerStarted","Data":"1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b"} Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.634384 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8l8nr" event={"ID":"f3e57739-79cd-498f-8e4b-8423b0fb5306","Type":"ContainerStarted","Data":"2466bd4242a77876fce1e39ced12d65209f1c8a60483c9ffec4044b0310a3bcd"} Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.637737 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c"} Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.637772 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a"} Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.639418 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a" exitCode=0 Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.639443 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a"} Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.652542 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.676930 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.691174 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.706759 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.751676 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.769838 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.788450 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.808743 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.822775 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.845665 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.861119 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.873970 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.887839 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.900227 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.912513 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.924129 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.936346 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.951749 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.975758 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:15 crc kubenswrapper[4938]: I1122 10:38:15.987718 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.000670 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:15Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.013336 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:16Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.028932 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:16Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.044137 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:16Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.055468 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:16Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.064599 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:16Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.448041 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:16 crc kubenswrapper[4938]: E1122 10:38:16.448153 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.648362 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681"} Nov 22 10:38:16 crc kubenswrapper[4938]: I1122 10:38:16.654443 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a"} Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.022545 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.023587 4938 scope.go:117] "RemoveContainer" containerID="d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683" Nov 22 10:38:17 crc kubenswrapper[4938]: E1122 10:38:17.023838 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.447173 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:17 crc kubenswrapper[4938]: E1122 10:38:17.447398 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.447206 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:17 crc kubenswrapper[4938]: E1122 10:38:17.448150 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.660873 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928"} Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.678057 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.700562 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.732045 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.744948 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.759829 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.782662 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.797684 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.819112 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.842946 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.857892 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.877561 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.898504 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:17 crc kubenswrapper[4938]: I1122 10:38:17.915234 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:17Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.060466 4938 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.062216 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.062280 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.062296 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.062417 4938 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.068542 4938 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.068785 4938 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.069816 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.069840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.069872 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.069886 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.069894 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.086698 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.094021 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.095134 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.098619 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.100678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.100708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.100717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.100731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.100743 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.106280 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.116753 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.120795 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.120835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.120846 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.120862 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.120871 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.122965 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.133264 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.135713 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.139733 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.139781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.139811 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.139829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.139840 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.148369 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.149715 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.154186 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.154226 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.154235 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.154250 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.154262 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.157953 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.166395 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.166549 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.168219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.168269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.168283 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.168298 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.168309 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.169252 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.179663 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.188495 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.201273 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.215303 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.224637 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.236859 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.268255 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.270720 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.270753 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.270761 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.270776 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.270785 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.283683 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.298930 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.309794 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.319375 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.335654 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.344465 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.354057 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.362971 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.371579 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.372739 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.372776 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.372787 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.372804 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.372816 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.382887 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.392369 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.400429 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.411128 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.423571 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.446985 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.447097 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.458818 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.474062 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.476199 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.476236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.476247 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.476265 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.476276 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.490654 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.504528 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.517648 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.528533 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.539241 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.550237 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.560385 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.572276 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.578519 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.578565 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.578577 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.578593 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.578705 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.582391 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.599958 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.616281 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.629204 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: E1122 10:38:18.650787 4938 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda430bfdd_4d1d_4bda_82ec_884f775af556.slice/crio-conmon-9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a.scope\": RecentStats: unable to find data in memory cache]" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.665649 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a" exitCode=0 Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.673122 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.682658 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.682775 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.682848 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.682996 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.683227 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.688948 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.699870 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.718134 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.734492 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.746099 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.761620 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.772810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.785725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.785763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.785774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.785789 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.785800 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.786412 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.796003 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.810187 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.820963 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.834829 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.845408 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.864620 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.887576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.887611 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.887619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.887642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.887651 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.990735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.990799 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.990811 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.990827 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:18 crc kubenswrapper[4938]: I1122 10:38:18.990839 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:18Z","lastTransitionTime":"2025-11-22T10:38:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.092729 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.093039 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.093048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.093279 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.093301 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.121450 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.121531 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.121578 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.121671 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.121716 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.121704414 +0000 UTC m=+39.589541813 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.121975 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.122000 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.121980301 +0000 UTC m=+39.589817700 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.122042 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.122028482 +0000 UTC m=+39.589865981 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.195960 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.195994 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.196004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.196020 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.196030 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.299050 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.299118 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.299137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.299163 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.299182 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.323499 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.323568 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323642 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323673 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323691 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323766 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.323744739 +0000 UTC m=+39.791582158 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323820 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323869 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323883 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.323983 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.323961985 +0000 UTC m=+39.791799444 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.402006 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.402313 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.402514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.402610 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.402685 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.446721 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.446773 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.446861 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:19 crc kubenswrapper[4938]: E1122 10:38:19.447327 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.505600 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.505653 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.505670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.505694 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.505711 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.608672 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.608738 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.608756 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.608783 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.608799 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.711514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.711583 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.711605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.711637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.711662 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.814734 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.814786 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.814804 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.814829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.814851 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.918524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.918573 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.918590 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.918615 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:19 crc kubenswrapper[4938]: I1122 10:38:19.918633 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:19Z","lastTransitionTime":"2025-11-22T10:38:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.021560 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.021635 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.021653 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.021679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.021712 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.124372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.124410 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.124418 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.124435 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.124445 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.227617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.227695 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.227717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.227748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.227772 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.331131 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.331185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.331203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.331229 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.331250 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.434215 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.434255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.434268 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.434285 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.434296 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.446805 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:20 crc kubenswrapper[4938]: E1122 10:38:20.447137 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.537619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.537667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.537680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.537699 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.537712 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.640851 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.640959 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.640971 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.640989 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.641001 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.743847 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.743889 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.743901 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.743939 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.743953 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.845967 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.846014 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.846023 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.846036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.846046 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.948379 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.948421 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.948434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.948451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:20 crc kubenswrapper[4938]: I1122 10:38:20.948463 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:20Z","lastTransitionTime":"2025-11-22T10:38:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.050953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.051060 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.051084 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.051112 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.051133 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.153190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.153242 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.153253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.153269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.153278 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.255625 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.255670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.255680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.255700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.255714 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.357681 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.357924 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.357933 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.357947 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.357957 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.446357 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.446381 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:21 crc kubenswrapper[4938]: E1122 10:38:21.446529 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:21 crc kubenswrapper[4938]: E1122 10:38:21.446620 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.460542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.460714 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.460781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.460848 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.460904 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.563944 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.563991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.564026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.564049 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.564062 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.666226 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.666261 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.666271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.666287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.666299 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.682440 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.684802 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2" exitCode=0 Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.684836 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.704296 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.724262 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.737058 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.746306 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.764151 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.768074 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.768109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.768125 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.768144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.768156 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.774862 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.787200 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.797274 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.812287 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.824313 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.837598 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.847551 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.858679 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870363 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870530 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.870550 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.974890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.974958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.974968 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.974982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:21 crc kubenswrapper[4938]: I1122 10:38:21.975007 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:21Z","lastTransitionTime":"2025-11-22T10:38:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.079587 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.080010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.080585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.080645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.080658 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.183536 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.183581 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.183591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.183606 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.183615 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.286077 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.286131 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.286148 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.286170 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.286188 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.388642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.388707 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.388727 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.388754 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.388772 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.447410 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:22 crc kubenswrapper[4938]: E1122 10:38:22.447649 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.491491 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.491521 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.491530 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.491545 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.491555 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.595043 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.595128 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.595152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.595181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.595202 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.697312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.697386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.697408 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.697431 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.697449 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.800610 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.800686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.800709 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.800741 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.800762 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.903955 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.903998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.904010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.904028 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:22 crc kubenswrapper[4938]: I1122 10:38:22.904041 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:22Z","lastTransitionTime":"2025-11-22T10:38:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.007787 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.008060 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.008073 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.008089 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.008102 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.111208 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.111251 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.111262 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.111277 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.111289 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.213813 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.213847 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.213871 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.213887 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.213896 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.316268 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.316699 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.316709 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.316727 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.316739 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.418629 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.418665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.418677 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.418696 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.418709 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.446974 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.446988 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:23 crc kubenswrapper[4938]: E1122 10:38:23.447093 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:23 crc kubenswrapper[4938]: E1122 10:38:23.447185 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.521146 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.521187 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.521200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.521217 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.521229 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.623846 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.623907 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.623950 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.623976 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.623995 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.696376 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.726608 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.726698 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.726722 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.726747 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.726767 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.830035 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.830097 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.830117 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.830141 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.830157 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.932546 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.932583 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.932600 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.932616 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:23 crc kubenswrapper[4938]: I1122 10:38:23.932629 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:23Z","lastTransitionTime":"2025-11-22T10:38:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.034970 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.035005 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.035013 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.035026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.035035 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.137483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.137561 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.137585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.137615 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.137636 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.240487 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.240525 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.240533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.240546 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.240556 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.343388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.343419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.343428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.343442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.343452 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.446745 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.446788 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.446799 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.446823 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.446835 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.447440 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:24 crc kubenswrapper[4938]: E1122 10:38:24.447635 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.549228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.549267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.549278 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.549295 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.549305 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.651650 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.652073 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.652090 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.652112 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.652130 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.703406 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.703799 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.703825 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.707489 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc" exitCode=0 Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.707529 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.723657 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.746025 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.751260 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.754637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.754690 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.754714 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.754742 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.754766 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.762219 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.775632 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.787323 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.797653 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.811301 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.822882 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.835520 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.850138 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.857284 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.857323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.857335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.857352 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.857363 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.862270 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.874293 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.889461 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.901042 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.913165 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.924575 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.937068 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.947461 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.955898 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.958853 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.958882 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.958890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.958904 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.959125 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:24Z","lastTransitionTime":"2025-11-22T10:38:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.968868 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.979315 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.988406 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:24 crc kubenswrapper[4938]: I1122 10:38:24.999825 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:24Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.010740 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.026480 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.036496 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.047602 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.056776 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.061177 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.061193 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.061200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.061212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.061221 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.163442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.163489 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.163498 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.163512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.163523 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.215081 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx"] Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.215714 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.224039 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.225285 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.244313 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.256481 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.266103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.266169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.266186 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.266212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.266230 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.267049 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.279455 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.289633 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.301882 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.315433 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.329607 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.348315 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.361097 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.368408 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.368461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.368477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.368503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.368520 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.377810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.386124 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.386165 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.386200 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db6a2428-0ce2-4754-8876-b95a9470a769-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.386244 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r75cw\" (UniqueName: \"kubernetes.io/projected/db6a2428-0ce2-4754-8876-b95a9470a769-kube-api-access-r75cw\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.394083 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.404588 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.419362 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.430936 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.447149 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.447186 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:25 crc kubenswrapper[4938]: E1122 10:38:25.447247 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:25 crc kubenswrapper[4938]: E1122 10:38:25.447316 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.471546 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.471587 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.471599 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.471616 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.471627 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.487409 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.487441 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.487479 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db6a2428-0ce2-4754-8876-b95a9470a769-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.487521 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r75cw\" (UniqueName: \"kubernetes.io/projected/db6a2428-0ce2-4754-8876-b95a9470a769-kube-api-access-r75cw\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.488209 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.488954 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/db6a2428-0ce2-4754-8876-b95a9470a769-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.496558 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/db6a2428-0ce2-4754-8876-b95a9470a769-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.513457 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r75cw\" (UniqueName: \"kubernetes.io/projected/db6a2428-0ce2-4754-8876-b95a9470a769-kube-api-access-r75cw\") pod \"ovnkube-control-plane-749d76644c-ngpcx\" (UID: \"db6a2428-0ce2-4754-8876-b95a9470a769\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.532588 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" Nov 22 10:38:25 crc kubenswrapper[4938]: W1122 10:38:25.544251 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb6a2428_0ce2_4754_8876_b95a9470a769.slice/crio-79e434adae888ab62d40118b50b470d6307982e11711711584f56aad2224d301 WatchSource:0}: Error finding container 79e434adae888ab62d40118b50b470d6307982e11711711584f56aad2224d301: Status 404 returned error can't find the container with id 79e434adae888ab62d40118b50b470d6307982e11711711584f56aad2224d301 Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.574321 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.574359 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.574369 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.574384 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.574395 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.676303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.676331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.676339 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.676353 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.676363 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.711269 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" event={"ID":"db6a2428-0ce2-4754-8876-b95a9470a769","Type":"ContainerStarted","Data":"79e434adae888ab62d40118b50b470d6307982e11711711584f56aad2224d301"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.714596 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.715113 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.774494 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.779330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.779366 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.779378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.779403 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.779416 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.789946 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.802548 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.814978 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.823168 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.838792 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.847251 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.856751 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.866061 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.876697 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.881953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.881994 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.882007 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.882024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.882032 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.887800 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.897212 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.916313 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.930196 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.939433 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-s7w5f"] Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.939513 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.940860 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:25 crc kubenswrapper[4938]: E1122 10:38:25.941111 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.953986 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.970472 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.980417 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.983780 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.983818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.983830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.983844 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.983855 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:25Z","lastTransitionTime":"2025-11-22T10:38:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:25 crc kubenswrapper[4938]: I1122 10:38:25.993192 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:25Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.005617 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.017036 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.029124 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.038799 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.055146 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.066496 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.075941 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.085726 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.085771 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.085785 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.085802 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.085813 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.086105 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.093301 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5qtx\" (UniqueName: \"kubernetes.io/projected/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-kube-api-access-m5qtx\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.093342 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.098500 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.108549 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.119174 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.132096 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.144090 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:26Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.187839 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.187880 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.187893 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.187925 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.187938 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.194357 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5qtx\" (UniqueName: \"kubernetes.io/projected/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-kube-api-access-m5qtx\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.194391 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: E1122 10:38:26.194486 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:26 crc kubenswrapper[4938]: E1122 10:38:26.194530 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:26.694516654 +0000 UTC m=+39.162354053 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.211477 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5qtx\" (UniqueName: \"kubernetes.io/projected/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-kube-api-access-m5qtx\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.290835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.290902 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.290949 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.290974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.290993 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.393102 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.393161 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.393178 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.393202 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.393221 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.446965 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:26 crc kubenswrapper[4938]: E1122 10:38:26.447162 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.496769 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.496815 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.496824 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.496838 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.496847 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.599639 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.599689 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.599700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.599719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.599731 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.699404 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:26 crc kubenswrapper[4938]: E1122 10:38:26.699550 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:26 crc kubenswrapper[4938]: E1122 10:38:26.699635 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:27.699617427 +0000 UTC m=+40.167454816 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.701654 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.701693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.701704 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.701718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.701728 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.804511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.804571 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.804589 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.804615 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.804633 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.907834 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.907878 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.907891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.907927 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:26 crc kubenswrapper[4938]: I1122 10:38:26.907941 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:26Z","lastTransitionTime":"2025-11-22T10:38:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.011367 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.011428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.011446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.011470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.011486 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.114197 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.114253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.114269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.114296 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.114319 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.205792 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.206053 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:38:43.206016283 +0000 UTC m=+55.673853712 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.206229 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.206292 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.206432 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.206454 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.206519 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:43.206503405 +0000 UTC m=+55.674340844 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.206542 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:43.206530606 +0000 UTC m=+55.674368045 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.216863 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.217243 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.217300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.217336 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.217365 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.319606 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.319662 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.319679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.319703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.319719 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.408176 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.408226 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408382 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408389 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408446 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408402 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408468 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408474 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408536 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:43.40851961 +0000 UTC m=+55.876357019 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.408554 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:38:43.40854622 +0000 UTC m=+55.876383629 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.425953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.426034 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.426072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.426105 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.426130 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.446522 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.446603 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.446680 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.446700 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.446847 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.446993 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.530503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.530581 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.530604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.530635 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.530656 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.634669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.634718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.634729 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.634750 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.634762 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.711586 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.712824 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: E1122 10:38:27.713191 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:29.713153997 +0000 UTC m=+42.180991426 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.725174 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" event={"ID":"db6a2428-0ce2-4754-8876-b95a9470a769","Type":"ContainerStarted","Data":"18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.738291 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.738347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.738364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.738386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.738405 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.841664 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.841727 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.841746 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.841771 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.841789 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.944449 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.944508 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.944522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.944544 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:27 crc kubenswrapper[4938]: I1122 10:38:27.944559 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:27Z","lastTransitionTime":"2025-11-22T10:38:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.047101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.047164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.047181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.047205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.047230 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.149462 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.149510 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.149523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.149542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.149555 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.251708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.251746 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.251759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.251778 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.251791 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.354646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.354690 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.354701 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.354719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.354731 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.389624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.389675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.389691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.389717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.389734 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.404083 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.406745 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.406775 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.406785 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.406798 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.406808 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.416285 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.419764 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.419794 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.419802 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.419815 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.419824 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.429619 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.432681 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.432706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.432715 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.432727 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.432735 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.446692 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.446805 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.449813 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.460579 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.460630 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.460646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.460660 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.460669 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.462447 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.477955 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.478821 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: E1122 10:38:28.479004 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.481945 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.482004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.482022 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.482048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.482064 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.492554 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.508683 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.522603 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.534213 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.545041 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.559148 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.570050 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.580313 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.583764 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.583805 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.583816 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.583834 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.583845 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.595974 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.614257 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.633887 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.649623 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.659688 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.674960 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.685791 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.685857 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.685882 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.685961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.685993 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.733657 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012" exitCode=0 Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.733766 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.737231 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" event={"ID":"db6a2428-0ce2-4754-8876-b95a9470a769","Type":"ContainerStarted","Data":"e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.775792 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.789030 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.789091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.789107 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.789128 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.789143 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.797547 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.830128 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.840096 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.849796 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.862765 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.874341 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.889711 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.891598 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.891622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.891633 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.891647 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.891658 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.900843 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.909627 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.925232 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.935437 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.944868 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.956659 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.968800 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.980507 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.989670 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.993523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.993543 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.993550 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.993562 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:28 crc kubenswrapper[4938]: I1122 10:38:28.993571 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:28Z","lastTransitionTime":"2025-11-22T10:38:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.002227 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.017059 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.027459 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.037499 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.050184 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.060013 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.071084 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.081259 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.089044 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.095505 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.095769 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.095831 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.095890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.095973 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.103930 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.112689 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.122036 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.133413 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.142798 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.153056 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.198880 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.198938 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.198949 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.198962 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.198971 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.301177 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.301205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.301213 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.301226 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.301235 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.403406 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.403450 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.403462 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.403477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.403489 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.447074 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.447110 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:29 crc kubenswrapper[4938]: E1122 10:38:29.447232 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:29 crc kubenswrapper[4938]: E1122 10:38:29.447331 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.447110 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:29 crc kubenswrapper[4938]: E1122 10:38:29.447673 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.506451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.506496 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.506509 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.506526 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.506537 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.608965 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.609533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.609710 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.609850 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.610055 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.713688 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.713750 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.713768 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.713800 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.713823 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.735963 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:29 crc kubenswrapper[4938]: E1122 10:38:29.736142 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:29 crc kubenswrapper[4938]: E1122 10:38:29.736230 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:33.736212177 +0000 UTC m=+46.204049576 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.817620 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.817713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.817732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.817766 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.817786 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.920378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.920438 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.920450 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.920474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:29 crc kubenswrapper[4938]: I1122 10:38:29.920487 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:29Z","lastTransitionTime":"2025-11-22T10:38:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.024011 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.024073 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.024085 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.024103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.024115 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.126191 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.126238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.126249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.126264 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.126276 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.228354 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.228399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.228413 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.228431 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.228443 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.331968 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.332038 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.332052 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.332080 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.332101 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.436160 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.436409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.436470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.436543 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.436613 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.446831 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:30 crc kubenswrapper[4938]: E1122 10:38:30.447021 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.539622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.539680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.539692 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.539710 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.539723 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.644648 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.644700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.644713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.644732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.644744 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.747047 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.748219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.748399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.748512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.748650 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.749366 4938 generic.go:334] "Generic (PLEG): container finished" podID="a430bfdd-4d1d-4bda-82ec-884f775af556" containerID="d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba" exitCode=0 Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.749418 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerDied","Data":"d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.770884 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.787185 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.820761 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.832421 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.843314 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.850840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.850891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.850904 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.850938 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.850950 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.856489 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.875468 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.892721 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.907615 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.928404 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.944010 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.956424 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.956483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.956497 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.956520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.956536 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:30Z","lastTransitionTime":"2025-11-22T10:38:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.958080 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.970892 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:30 crc kubenswrapper[4938]: I1122 10:38:30.987199 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.003386 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.016081 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.058551 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.058594 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.058605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.058619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.058629 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.161572 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.161600 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.161609 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.161623 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.161631 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.265604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.265686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.265706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.265739 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.265760 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.369405 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.369494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.369517 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.369545 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.369564 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.446904 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.446984 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.446904 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:31 crc kubenswrapper[4938]: E1122 10:38:31.447127 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:31 crc kubenswrapper[4938]: E1122 10:38:31.447349 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:31 crc kubenswrapper[4938]: E1122 10:38:31.447607 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.472635 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.472693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.472707 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.472726 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.472741 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.576346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.576410 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.576424 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.576447 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.576460 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.679440 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.679495 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.679507 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.679523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.679534 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.782263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.782339 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.782362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.782394 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.782417 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.885336 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.885376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.885388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.885405 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.885416 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.988752 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.988802 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.988814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.988831 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:31 crc kubenswrapper[4938]: I1122 10:38:31.988843 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:31Z","lastTransitionTime":"2025-11-22T10:38:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.092179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.092231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.092243 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.092261 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.092272 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.195097 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.195181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.195208 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.195240 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.195264 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.297565 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.297609 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.297617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.297631 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.297640 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.399663 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.399696 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.399706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.399720 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.399729 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.446986 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:32 crc kubenswrapper[4938]: E1122 10:38:32.447317 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.447523 4938 scope.go:117] "RemoveContainer" containerID="d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.502070 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.502151 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.502165 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.502182 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.502194 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.605143 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.605190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.605201 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.605215 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.605224 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.707675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.707711 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.707722 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.707741 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.707752 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.758983 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" event={"ID":"a430bfdd-4d1d-4bda-82ec-884f775af556","Type":"ContainerStarted","Data":"bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.780356 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.796925 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.809517 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.809555 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.809564 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.809579 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.809588 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.817082 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.831747 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.846807 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.872123 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.889260 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.908022 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.917081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.917137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.917147 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.917169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.917182 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:32Z","lastTransitionTime":"2025-11-22T10:38:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.924703 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.934554 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.951439 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.963442 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.976086 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:32 crc kubenswrapper[4938]: I1122 10:38:32.989982 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:32Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.002153 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:33Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.014651 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:33Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.019575 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.019684 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.019745 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.019808 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.019863 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.122824 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.122892 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.122933 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.122959 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.122978 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.225936 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.225985 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.226002 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.226024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.226041 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.329169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.329217 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.329227 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.329245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.329255 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.431552 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.431588 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.431597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.431612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.431625 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.447190 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.447217 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:33 crc kubenswrapper[4938]: E1122 10:38:33.447411 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.447247 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:33 crc kubenswrapper[4938]: E1122 10:38:33.447538 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:33 crc kubenswrapper[4938]: E1122 10:38:33.447618 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.533945 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.534217 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.534284 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.534346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.534406 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.637018 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.637057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.637067 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.637082 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.637095 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.738982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.739028 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.739041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.739058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.739072 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.778561 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:33 crc kubenswrapper[4938]: E1122 10:38:33.778731 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:33 crc kubenswrapper[4938]: E1122 10:38:33.778789 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:41.778773426 +0000 UTC m=+54.246610825 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.841236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.841304 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.841323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.841349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.841366 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.943320 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.943354 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.943365 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.943381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:33 crc kubenswrapper[4938]: I1122 10:38:33.943394 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:33Z","lastTransitionTime":"2025-11-22T10:38:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.045604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.045644 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.045652 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.045667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.045676 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.148136 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.148172 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.148181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.148195 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.148206 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.251131 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.251183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.251199 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.251222 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.251238 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.353859 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.353901 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.353921 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.353936 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.353945 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.447080 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:34 crc kubenswrapper[4938]: E1122 10:38:34.447299 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.461380 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.461672 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.461863 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.462037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.462185 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.563946 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.563970 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.563978 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.563991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.563999 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.666296 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.666599 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.666706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.666855 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.667018 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.768742 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.769582 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.769617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.769638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.769654 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.769667 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.771507 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.772433 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.787104 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.799562 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.816256 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.825863 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.842537 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.852309 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.863726 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.872511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.872553 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.872567 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.872584 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.872597 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.876177 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.889347 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.900107 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.910946 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.923449 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.934668 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.945212 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.957001 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.970591 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.975165 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.975198 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.975212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.975228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:34 crc kubenswrapper[4938]: I1122 10:38:34.975239 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:34Z","lastTransitionTime":"2025-11-22T10:38:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.078081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.078111 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.078122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.078138 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.078149 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.180960 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.181036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.181048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.181063 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.181074 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.283100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.283362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.283478 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.283634 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.283736 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.385941 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.385982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.385993 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.386010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.386023 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.446787 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.446866 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.446899 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:35 crc kubenswrapper[4938]: E1122 10:38:35.447035 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:35 crc kubenswrapper[4938]: E1122 10:38:35.447255 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:35 crc kubenswrapper[4938]: E1122 10:38:35.447561 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.488722 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.488778 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.488793 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.488816 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.488842 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.591076 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.591122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.591135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.591156 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.591171 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.694336 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.694385 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.694395 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.694411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.694422 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.777207 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/0.log" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.779969 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759" exitCode=1 Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.780259 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.781823 4938 scope.go:117] "RemoveContainer" containerID="46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.796415 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.796442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.796451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.796465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.796474 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.800084 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.812435 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.833532 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:35Z\\\",\\\"message\\\":\\\"974 6135 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968054 6135 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968107 6135 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968285 6135 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:38:34.969847 6135 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:38:34.969874 6135 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 10:38:34.969878 6135 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 10:38:34.969897 6135 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:38:34.969924 6135 factory.go:656] Stopping watch factory\\\\nI1122 10:38:34.969942 6135 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:38:34.969962 6135 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:38:34.969972 6135 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:38:34.969979 6135 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:38:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.848032 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.859100 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.870770 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.883129 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.896387 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.899098 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.899126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.899137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.899154 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.899166 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:35Z","lastTransitionTime":"2025-11-22T10:38:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.908139 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.924070 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.937484 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.944887 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.953009 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.964184 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.975494 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:35 crc kubenswrapper[4938]: I1122 10:38:35.986498 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.000981 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.001100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.001173 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.001263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.001318 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.103001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.103031 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.103039 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.103053 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.103065 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.204826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.204866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.204878 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.204894 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.204906 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.307269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.307305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.307316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.307332 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.307343 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.410442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.410499 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.410512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.410528 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.410541 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.446842 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:36 crc kubenswrapper[4938]: E1122 10:38:36.446999 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.512156 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.512190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.512199 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.512214 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.512223 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.614272 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.614322 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.614335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.614355 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.614369 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.717443 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.717470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.717479 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.717493 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.717503 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.786518 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/0.log" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.789555 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.789931 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.802813 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.814584 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.819064 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.819120 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.819134 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.819152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.819164 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.825729 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.837489 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.846226 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.862990 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:35Z\\\",\\\"message\\\":\\\"974 6135 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968054 6135 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968107 6135 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968285 6135 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:38:34.969847 6135 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:38:34.969874 6135 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 10:38:34.969878 6135 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 10:38:34.969897 6135 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:38:34.969924 6135 factory.go:656] Stopping watch factory\\\\nI1122 10:38:34.969942 6135 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:38:34.969962 6135 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:38:34.969972 6135 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:38:34.969979 6135 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:38:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.875070 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.886999 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.901309 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.915661 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.921574 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.921621 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.921633 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.921651 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.921663 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:36Z","lastTransitionTime":"2025-11-22T10:38:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.930869 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.945545 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.960014 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.971128 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.982233 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:36 crc kubenswrapper[4938]: I1122 10:38:36.990745 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.023338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.023376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.023386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.023399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.023409 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.125664 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.125717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.125738 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.125759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.125770 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.228381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.228421 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.228431 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.228444 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.228453 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.331645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.331677 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.331686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.331698 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.331707 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.434889 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.434946 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.434958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.434972 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.434984 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.447240 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.447316 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.447275 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:37 crc kubenswrapper[4938]: E1122 10:38:37.447395 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:37 crc kubenswrapper[4938]: E1122 10:38:37.447512 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:37 crc kubenswrapper[4938]: E1122 10:38:37.447597 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.537582 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.537620 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.537628 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.537642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.537652 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.640103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.640170 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.640192 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.640221 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.640242 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.743281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.743480 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.743489 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.743503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.743511 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.798631 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/1.log" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.799234 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/0.log" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.802227 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72" exitCode=1 Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.802277 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.802314 4938 scope.go:117] "RemoveContainer" containerID="46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.802849 4938 scope.go:117] "RemoveContainer" containerID="6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72" Nov 22 10:38:37 crc kubenswrapper[4938]: E1122 10:38:37.803005 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.815798 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.832229 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.844934 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.846585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.846697 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.846810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.846894 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.846993 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.854802 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.862697 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.885946 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:35Z\\\",\\\"message\\\":\\\"974 6135 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968054 6135 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968107 6135 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968285 6135 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:38:34.969847 6135 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:38:34.969874 6135 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 10:38:34.969878 6135 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 10:38:34.969897 6135 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:38:34.969924 6135 factory.go:656] Stopping watch factory\\\\nI1122 10:38:34.969942 6135 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:38:34.969962 6135 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:38:34.969972 6135 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:38:34.969979 6135 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:38:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.907279 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.922313 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.931495 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.941781 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.949226 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.949330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.949387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.949446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.949500 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:37Z","lastTransitionTime":"2025-11-22T10:38:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.952054 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.961453 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.973361 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.983673 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:37 crc kubenswrapper[4938]: I1122 10:38:37.995933 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:37Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.009302 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.051670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.051730 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.051745 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.051768 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.051784 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.154266 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.154331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.154347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.154370 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.154386 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.257113 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.257147 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.257155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.257169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.257180 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.359840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.359885 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.359981 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.359998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.360007 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.447485 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.447600 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.461584 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.462258 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.462287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.462299 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.462312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.462322 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.479935 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.499521 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46323283b336e7cbadd1e807922861e74c74449411071f3c3d7740d86b4ed759\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:35Z\\\",\\\"message\\\":\\\"974 6135 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968054 6135 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968107 6135 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 10:38:34.968285 6135 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:38:34.969847 6135 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:38:34.969874 6135 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 10:38:34.969878 6135 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 10:38:34.969897 6135 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:38:34.969924 6135 factory.go:656] Stopping watch factory\\\\nI1122 10:38:34.969942 6135 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:38:34.969962 6135 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:38:34.969972 6135 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:38:34.969979 6135 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:38:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.512939 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.522195 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.535733 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.549236 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.563053 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.564501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.564569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.564593 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.564622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.564641 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.578010 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.583476 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.583745 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.583991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.584171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.584326 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.596213 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.598085 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.601461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.601503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.601512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.601526 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.601535 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.611256 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.612564 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.616346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.616377 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.616387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.616401 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.616413 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.623829 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.627904 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.630485 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.630511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.630553 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.630570 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.630582 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.632318 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.642642 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645028 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645371 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645403 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645412 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645427 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.645436 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.657799 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.657953 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.658436 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.667019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.667062 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.667072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.667090 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.667103 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.670986 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.769319 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.769361 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.769371 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.769386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.769395 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.805723 4938 scope.go:117] "RemoveContainer" containerID="6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72" Nov 22 10:38:38 crc kubenswrapper[4938]: E1122 10:38:38.805874 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.819995 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.841699 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.853574 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.863762 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.871299 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.871346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.871359 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.871377 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.871389 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.878766 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.890621 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.908590 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.922381 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.932054 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.948491 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.959597 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.971627 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.973638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.973672 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.973683 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.973697 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.973707 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:38Z","lastTransitionTime":"2025-11-22T10:38:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.986001 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:38 crc kubenswrapper[4938]: I1122 10:38:38.997477 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.005586 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.013308 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.075991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.076053 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.076069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.076088 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.076101 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.178103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.178139 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.178150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.178166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.178178 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.280394 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.280426 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.280434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.280448 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.280458 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.382835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.382880 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.382892 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.382929 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.382944 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.446555 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.446554 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:39 crc kubenswrapper[4938]: E1122 10:38:39.446688 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:39 crc kubenswrapper[4938]: E1122 10:38:39.446745 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.446586 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:39 crc kubenswrapper[4938]: E1122 10:38:39.446823 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.485203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.485247 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.485263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.485279 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.485288 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.587487 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.587536 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.587547 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.587566 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.587579 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.689306 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.689350 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.689365 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.689379 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.689390 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.767971 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.775759 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.789041 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.791830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.791879 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.791895 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.791954 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.791965 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.804248 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.809484 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/1.log" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.818543 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.827715 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.843782 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.852864 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.865926 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.876962 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.887033 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.893690 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.894266 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.894353 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.894461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.894544 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.902257 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.915057 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.931476 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.943805 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.955295 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.968828 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.988142 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:39Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.998033 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.998080 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.998091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.998109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:39 crc kubenswrapper[4938]: I1122 10:38:39.998122 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:39Z","lastTransitionTime":"2025-11-22T10:38:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.100528 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.100569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.100580 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.100597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.100609 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.203245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.203294 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.203304 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.203320 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.203330 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.305963 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.306009 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.306017 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.306032 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.306040 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.408248 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.408323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.408346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.408375 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.408397 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.447198 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:40 crc kubenswrapper[4938]: E1122 10:38:40.447334 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.512903 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.512965 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.512978 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.512995 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.513007 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.615466 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.615521 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.615534 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.615552 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.615562 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.718636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.718682 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.718699 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.718719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.718736 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.821645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.821685 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.821694 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.821732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.821744 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.923983 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.924050 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.924069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.924101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:40 crc kubenswrapper[4938]: I1122 10:38:40.924110 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:40Z","lastTransitionTime":"2025-11-22T10:38:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.026298 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.026523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.026585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.026646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.026730 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.128611 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.128650 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.128661 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.128675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.128685 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.231391 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.231456 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.231467 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.231489 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.231504 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.333870 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.333933 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.333944 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.333957 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.333967 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.436352 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.436389 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.436400 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.436416 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.436427 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.446777 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.446809 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:41 crc kubenswrapper[4938]: E1122 10:38:41.446928 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.446981 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:41 crc kubenswrapper[4938]: E1122 10:38:41.447129 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:41 crc kubenswrapper[4938]: E1122 10:38:41.447198 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.539179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.539222 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.539230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.539243 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.539251 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.641543 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.641591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.641602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.641617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.641633 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.743279 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.743327 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.743339 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.743356 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.743365 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.804070 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:41 crc kubenswrapper[4938]: E1122 10:38:41.804270 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:41 crc kubenswrapper[4938]: E1122 10:38:41.804358 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:38:57.804334673 +0000 UTC m=+70.272172072 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.846004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.846053 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.846067 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.846084 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.846097 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.948775 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.948818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.948826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.948840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:41 crc kubenswrapper[4938]: I1122 10:38:41.948851 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:41Z","lastTransitionTime":"2025-11-22T10:38:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.050732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.050760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.050768 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.050781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.050792 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.153708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.153748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.153757 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.153789 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.153800 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.256775 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.256837 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.256848 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.256866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.256878 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.359149 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.359185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.359195 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.359211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.359222 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.447104 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:42 crc kubenswrapper[4938]: E1122 10:38:42.447464 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.461383 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.461438 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.461452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.461475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.461489 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.564014 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.564457 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.564556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.564652 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.564772 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.668799 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.668851 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.668863 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.668879 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.668892 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.771769 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.771809 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.771820 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.771835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.771848 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.874593 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.874852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.874943 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.875033 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.875118 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.978684 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.979631 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.979833 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.980098 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:42 crc kubenswrapper[4938]: I1122 10:38:42.980356 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:42Z","lastTransitionTime":"2025-11-22T10:38:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.083638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.083725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.083749 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.083781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.083807 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.186614 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.186693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.186704 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.186719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.186728 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.218154 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.218238 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.218254 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:39:15.218234131 +0000 UTC m=+87.686071520 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.218300 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.218346 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.218384 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:39:15.218375514 +0000 UTC m=+87.686212913 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.218389 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.218413 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:39:15.218407015 +0000 UTC m=+87.686244414 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.289401 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.289437 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.289447 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.289536 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.289552 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.392290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.392341 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.392349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.392364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.392375 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.420403 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.420605 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420641 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420679 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420692 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420760 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:39:15.420740628 +0000 UTC m=+87.888578027 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420861 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420948 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.420968 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.421079 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:39:15.421057426 +0000 UTC m=+87.888894865 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.447233 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.447294 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.447298 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.447382 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.447521 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:43 crc kubenswrapper[4938]: E1122 10:38:43.447650 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.494540 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.494582 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.494595 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.494638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.494652 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.597451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.597512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.597520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.597556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.597568 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.700388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.700433 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.700441 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.700470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.700480 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.802885 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.802930 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.802941 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.802956 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.802966 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.905450 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.905487 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.905497 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.905512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:43 crc kubenswrapper[4938]: I1122 10:38:43.905522 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:43Z","lastTransitionTime":"2025-11-22T10:38:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.008201 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.008244 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.008259 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.008280 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.008297 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.110782 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.110840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.110854 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.110871 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.110884 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.213021 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.213303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.213372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.213442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.213504 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.315412 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.315452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.315461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.315477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.315492 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.418189 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.418231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.418240 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.418256 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.418271 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.448048 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:44 crc kubenswrapper[4938]: E1122 10:38:44.448217 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.520807 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.520839 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.520849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.520862 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.520871 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.623678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.623730 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.623739 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.623755 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.623767 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.726231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.726275 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.726286 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.726305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.726318 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.828395 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.828441 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.828450 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.828464 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.828474 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.931797 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.931870 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.931891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.931955 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:44 crc kubenswrapper[4938]: I1122 10:38:44.931980 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:44Z","lastTransitionTime":"2025-11-22T10:38:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.035426 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.036042 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.036138 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.036230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.036314 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.139318 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.139377 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.139394 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.139417 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.139434 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.242257 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.242294 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.242306 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.242323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.242335 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.345652 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.345715 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.345735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.345763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.345784 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.447356 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.447401 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:45 crc kubenswrapper[4938]: E1122 10:38:45.447563 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.447748 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:45 crc kubenswrapper[4938]: E1122 10:38:45.447905 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:45 crc kubenswrapper[4938]: E1122 10:38:45.448138 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.449197 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.449267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.449292 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.449857 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.450100 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.553122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.553222 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.553261 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.553293 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.553332 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.656527 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.656590 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.656609 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.656634 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.656653 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.761285 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.761379 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.761406 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.761445 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.761485 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.864648 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.864697 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.864706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.864728 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.864740 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.967725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.968047 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.968233 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.968335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:45 crc kubenswrapper[4938]: I1122 10:38:45.968424 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:45Z","lastTransitionTime":"2025-11-22T10:38:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.071880 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.072708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.072803 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.072939 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.073036 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.175721 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.175794 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.175812 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.175840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.175862 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.279683 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.279798 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.279823 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.279846 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.279860 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.382039 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.382081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.382090 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.382106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.382117 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.447251 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:46 crc kubenswrapper[4938]: E1122 10:38:46.447456 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.484171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.484439 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.484522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.484624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.484709 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.586707 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.586734 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.586742 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.586754 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.586762 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.689236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.689277 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.689287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.689302 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.689312 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.791765 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.791810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.791826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.791844 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.791858 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.894857 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.894897 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.894927 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.894948 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.894958 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.998406 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.998462 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.998477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.998501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:46 crc kubenswrapper[4938]: I1122 10:38:46.998516 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:46Z","lastTransitionTime":"2025-11-22T10:38:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.101736 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.102393 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.102518 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.102625 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.102728 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.205967 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.206046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.206069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.206099 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.206117 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.308773 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.308848 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.308866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.308891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.308946 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.411774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.411877 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.411896 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.411962 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.411982 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.447159 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.447241 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.447270 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:47 crc kubenswrapper[4938]: E1122 10:38:47.447773 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:47 crc kubenswrapper[4938]: E1122 10:38:47.447943 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:47 crc kubenswrapper[4938]: E1122 10:38:47.448199 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.515310 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.515368 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.515387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.515414 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.515431 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.618882 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.619000 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.619024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.619057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.619077 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.721515 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.721566 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.721616 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.721636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.721649 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.824987 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.825364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.825495 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.825645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.825786 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.928977 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.929044 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.929062 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.929086 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:47 crc kubenswrapper[4938]: I1122 10:38:47.929104 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:47Z","lastTransitionTime":"2025-11-22T10:38:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.032420 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.032474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.032491 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.032515 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.032534 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.135819 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.135887 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.135953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.135986 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.136010 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.238462 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.238515 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.238529 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.238549 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.238565 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.341281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.341317 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.341325 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.341340 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.341349 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.443675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.443713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.443721 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.443735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.443744 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.446386 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:48 crc kubenswrapper[4938]: E1122 10:38:48.446562 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.463698 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.478057 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.488940 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.498995 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.513437 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.522780 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.540288 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.546392 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.546439 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.546470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.546484 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.546492 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.553561 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.564778 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.576799 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.588860 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.604283 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.614231 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.629327 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.640895 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.648655 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.648693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.648706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.648721 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.648734 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.651530 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.660496 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.751242 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.751314 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.751327 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.751377 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.751392 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.853351 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.853673 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.853784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.853888 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.854024 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.943768 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.943813 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.943824 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.943841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.943852 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: E1122 10:38:48.955190 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.960602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.960642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.960654 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.960672 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.960683 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: E1122 10:38:48.973261 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.977176 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.977237 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.977247 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.977290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.977303 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:48 crc kubenswrapper[4938]: E1122 10:38:48.990846 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:48Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.994326 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.994354 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.994363 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.994378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:48 crc kubenswrapper[4938]: I1122 10:38:48.994388 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:48Z","lastTransitionTime":"2025-11-22T10:38:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.006565 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.010388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.010527 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.010618 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.010741 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.010824 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.023947 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.024101 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.025731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.025878 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.026038 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.026169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.026300 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.129525 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.129998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.130209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.130371 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.130524 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.232624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.232661 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.232674 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.232690 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.232699 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.335074 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.335175 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.335203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.335236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.335256 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.437619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.437675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.437694 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.437720 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.437738 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.447029 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.447181 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.447031 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.447312 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.447031 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:49 crc kubenswrapper[4938]: E1122 10:38:49.447407 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.540509 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.541040 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.541249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.541447 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.541661 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.650859 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.650975 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.650994 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.651020 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.651037 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.754402 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.754453 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.754465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.754491 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.754505 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.857841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.857958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.857985 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.858017 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.858080 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.960759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.960836 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.960849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.960874 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:49 crc kubenswrapper[4938]: I1122 10:38:49.960888 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:49Z","lastTransitionTime":"2025-11-22T10:38:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.064393 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.064473 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.064483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.064500 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.064509 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.168339 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.168409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.168428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.168458 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.168480 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.271852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.272212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.272225 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.272244 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.272254 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.375048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.375101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.375119 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.375137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.375149 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.446537 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:50 crc kubenswrapper[4938]: E1122 10:38:50.446769 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.477896 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.477969 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.477978 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.477993 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.478002 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.580524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.580569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.580584 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.580605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.580620 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.684081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.684175 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.684203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.684234 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.684259 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.787958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.788019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.788037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.788057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.788082 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.891168 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.891232 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.891249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.891283 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.891300 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.994986 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.995040 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.995055 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.995077 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:50 crc kubenswrapper[4938]: I1122 10:38:50.995093 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:50Z","lastTransitionTime":"2025-11-22T10:38:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.097815 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.097894 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.097926 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.097953 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.097968 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.197505 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.199711 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.199752 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.199763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.199782 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.199798 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.209446 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.221365 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.247009 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.258073 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.268939 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.280427 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.293827 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.302494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.302533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.302542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.302557 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.302567 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.306636 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.317436 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.332790 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.345039 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.360238 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.371994 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.384202 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.398481 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.404903 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.404947 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.404955 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.404968 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.404977 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.413650 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.427054 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.446447 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.446520 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:51 crc kubenswrapper[4938]: E1122 10:38:51.446572 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:51 crc kubenswrapper[4938]: E1122 10:38:51.446732 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.446763 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:51 crc kubenswrapper[4938]: E1122 10:38:51.446959 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.507271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.507331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.507342 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.507360 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.507371 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.609271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.609326 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.609342 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.609363 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.609380 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.711632 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.711680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.711689 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.711704 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.711714 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.813554 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.813605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.813619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.813636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.813648 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.915620 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.915660 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.915670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.915685 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:51 crc kubenswrapper[4938]: I1122 10:38:51.915694 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:51Z","lastTransitionTime":"2025-11-22T10:38:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.018851 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.018951 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.018970 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.018994 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.019011 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.121871 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.121988 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.122016 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.122050 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.122073 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.225098 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.225166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.225184 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.225208 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.225228 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.327969 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.328045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.328057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.328076 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.328089 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.431113 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.431159 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.431168 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.431183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.431195 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.446970 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:52 crc kubenswrapper[4938]: E1122 10:38:52.447117 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.447739 4938 scope.go:117] "RemoveContainer" containerID="6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.534348 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.534390 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.534398 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.534413 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.534424 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.638300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.638364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.638385 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.638415 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.638437 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.742113 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.742172 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.742189 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.742214 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.742231 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.844534 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.844570 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.844580 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.844596 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.844606 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.864108 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/1.log" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.866387 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.867360 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.893660 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.916136 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.937141 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.946417 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.946703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.946718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.946734 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.946745 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:52Z","lastTransitionTime":"2025-11-22T10:38:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.954083 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.974185 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.986973 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:52 crc kubenswrapper[4938]: I1122 10:38:52.995998 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:52Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.006235 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.017901 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.029522 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.041022 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.048446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.048487 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.048499 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.048515 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.048528 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.057677 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.069958 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.084477 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.096670 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.111834 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.123250 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.150964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.151008 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.151019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.151036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.151047 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.252933 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.252997 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.253015 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.253037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.253055 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.355948 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.356003 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.356017 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.356036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.356049 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.446647 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.446689 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.446726 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:53 crc kubenswrapper[4938]: E1122 10:38:53.446787 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:53 crc kubenswrapper[4938]: E1122 10:38:53.446856 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:53 crc kubenswrapper[4938]: E1122 10:38:53.447029 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.458663 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.458711 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.458722 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.458735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.458743 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.561576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.561617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.561628 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.561645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.561659 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.664029 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.664075 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.664085 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.664100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.664112 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.766763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.766817 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.766836 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.766857 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.766873 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.871765 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.871860 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.871899 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.871931 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.871943 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.874498 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/2.log" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.875152 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/1.log" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.877855 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" exitCode=1 Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.877899 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.877950 4938 scope.go:117] "RemoveContainer" containerID="6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.878566 4938 scope.go:117] "RemoveContainer" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" Nov 22 10:38:53 crc kubenswrapper[4938]: E1122 10:38:53.878709 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.895517 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.906729 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.918750 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.935190 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.950441 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.960971 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.970863 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.974255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.974292 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.974304 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.974322 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.974335 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:53Z","lastTransitionTime":"2025-11-22T10:38:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.981097 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:53 crc kubenswrapper[4938]: I1122 10:38:53.996050 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:53Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.011168 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.020748 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.029853 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.041783 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.058304 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6099e9a52c3af6288494c5fbe4686e036dce8ceba7ada4ca0e05ba6bab15df72\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:37Z\\\",\\\"message\\\":\\\"ode-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:36Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:38:36.709690 6473 lb_config.go:1031] Cluster endpoints for openshift-authentication/oauth-openshift for network=default are: map[]\\\\nI1122 10:38:36.709645 6473 services_controller.go:434] Service openshift-machine-config-operator/machine-config-controller retrieved from lister for network=default: \\\\u0026Service{ObjectMeta:{machine-config-controller openshift-machine-config-operator aa30290d-3a39-43ba-a212-6439bd680987 4486 0 2025-02-23 05:12:25 +0000 UTC \\\\u003cnil\\\\u003e \\\\u003cnil\\\\u003e map[k8s-app:machine-config-controller] map[include.release.openshift.io/ibm-cloud-managed:true include.release.openshift.io/self-managed-high-availability:true include.release.openshift.io/single-node-developer:true service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-secret-name:mcc-proxy-tls service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc0079c62db \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Na\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.066795 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.076435 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.077189 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.077218 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.077228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.077245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.077259 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.087492 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.179019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.179070 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.179082 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.179102 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.179113 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.281150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.281203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.281212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.281225 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.281233 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.384660 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.384699 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.384710 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.384725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.384736 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.447455 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:54 crc kubenswrapper[4938]: E1122 10:38:54.447601 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.486713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.486748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.486756 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.486773 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.486781 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.588560 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.588632 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.588654 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.588678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.588695 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.692399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.692442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.692452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.692467 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.692477 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.795203 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.795242 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.795251 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.795268 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.795278 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.882488 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/2.log" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.885567 4938 scope.go:117] "RemoveContainer" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" Nov 22 10:38:54 crc kubenswrapper[4938]: E1122 10:38:54.885718 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898132 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898318 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.898374 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:54Z","lastTransitionTime":"2025-11-22T10:38:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.909321 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.928263 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.937527 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.947858 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.958679 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.968781 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.978592 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:54 crc kubenswrapper[4938]: I1122 10:38:54.990873 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:54Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.001519 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.001767 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.002041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.002169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.002287 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.002780 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.013581 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.024812 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.034768 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.044736 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.057832 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.069186 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.079291 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:55Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.104746 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.104806 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.104816 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.104830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.104838 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.207280 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.207308 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.207316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.207329 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.207337 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.310324 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.310367 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.310378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.310395 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.310406 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.413187 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.413230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.413239 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.413255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.413264 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.446778 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.446801 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:55 crc kubenswrapper[4938]: E1122 10:38:55.446893 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.446929 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:55 crc kubenswrapper[4938]: E1122 10:38:55.447074 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:55 crc kubenswrapper[4938]: E1122 10:38:55.447204 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.515357 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.515399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.515411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.515428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.515438 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.618452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.618497 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.618507 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.618522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.618533 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.721117 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.721157 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.721167 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.721181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.721191 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.824550 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.824613 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.824633 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.824660 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.824678 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.928065 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.928103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.928112 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.928126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:55 crc kubenswrapper[4938]: I1122 10:38:55.928136 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:55Z","lastTransitionTime":"2025-11-22T10:38:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.031190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.031242 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.031253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.031273 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.031285 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.134183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.134223 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.134232 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.134248 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.134259 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.236735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.236781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.236835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.236854 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.236867 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.338661 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.338701 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.338712 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.338725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.338735 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.441971 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.442062 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.442081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.442105 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.442127 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.447297 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:56 crc kubenswrapper[4938]: E1122 10:38:56.447458 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.544661 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.544905 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.545024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.545110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.545173 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.647691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.647750 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.647766 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.647790 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.647808 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.750548 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.750595 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.750608 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.750627 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.750637 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.852159 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.852209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.852219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.852237 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.852249 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.955026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.955100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.955125 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.955158 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:56 crc kubenswrapper[4938]: I1122 10:38:56.955179 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:56Z","lastTransitionTime":"2025-11-22T10:38:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.058340 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.058481 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.058544 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.058577 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.058594 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.161876 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.161961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.161979 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.162003 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.162020 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.265261 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.265530 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.265613 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.265701 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.265825 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.369124 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.369456 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.369556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.369639 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.369726 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.447303 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.447343 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.447900 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:57 crc kubenswrapper[4938]: E1122 10:38:57.448104 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:57 crc kubenswrapper[4938]: E1122 10:38:57.448241 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:57 crc kubenswrapper[4938]: E1122 10:38:57.448410 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.472158 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.472199 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.472211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.472228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.472242 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.574236 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.574281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.574292 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.574308 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.574319 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.677058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.677109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.677125 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.677150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.677173 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.779287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.779542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.779644 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.779741 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.779862 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.883101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.883136 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.883144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.883158 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.883170 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.900781 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:57 crc kubenswrapper[4938]: E1122 10:38:57.901025 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:57 crc kubenswrapper[4938]: E1122 10:38:57.901610 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:39:29.901580131 +0000 UTC m=+102.369417560 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.985838 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.986443 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.986642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.986971 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:57 crc kubenswrapper[4938]: I1122 10:38:57.987159 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:57Z","lastTransitionTime":"2025-11-22T10:38:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.090627 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.090673 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.090685 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.090701 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.090710 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.192732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.193004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.193120 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.193219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.193298 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.296057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.296115 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.296132 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.296157 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.296178 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.398759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.398799 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.398809 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.398824 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.398835 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.447111 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:38:58 crc kubenswrapper[4938]: E1122 10:38:58.447262 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.465776 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.495191 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.509496 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.509530 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.509539 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.509554 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.509564 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.532810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.546577 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.556992 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.570863 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.588403 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.600533 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.609058 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.611734 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.611781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.611794 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.611810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.611823 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.626527 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.637577 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.646341 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.655651 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.665450 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.678430 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.692304 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.701682 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.713615 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.713651 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.713666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.713683 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.713697 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.815088 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.815144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.815153 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.815166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.815175 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.917705 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.917974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.918056 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.918146 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:58 crc kubenswrapper[4938]: I1122 10:38:58.918216 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:58Z","lastTransitionTime":"2025-11-22T10:38:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.020636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.020670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.020679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.020693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.020701 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.123210 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.123260 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.123271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.123284 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.123293 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.142146 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.142366 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.142513 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.142652 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.142777 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.155878 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.159965 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.160028 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.160041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.160058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.160071 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.178110 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.181396 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.181430 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.181440 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.181457 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.181469 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.192087 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.196164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.196221 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.196230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.196245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.196254 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.206643 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.209979 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.210325 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.210428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.210528 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.210613 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.221481 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:38:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.221630 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.224756 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.224829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.224849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.224873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.224891 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.326634 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.326867 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.326995 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.327116 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.327226 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.429559 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.429603 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.429616 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.429631 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.429643 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.447195 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.447302 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.447468 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.447495 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.447599 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:38:59 crc kubenswrapper[4938]: E1122 10:38:59.447708 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.532566 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.532657 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.532679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.532704 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.532723 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.636604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.636672 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.636689 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.636713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.636730 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.739883 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.739997 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.740010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.740027 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.740038 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.841708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.841765 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.841777 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.841796 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.841809 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.943954 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.943993 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.944001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.944015 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:38:59 crc kubenswrapper[4938]: I1122 10:38:59.944023 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:38:59Z","lastTransitionTime":"2025-11-22T10:38:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.046310 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.046350 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.046361 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.046376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.046388 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.148598 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.148637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.148646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.148659 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.148668 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.252602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.252649 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.252660 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.252677 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.252689 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.355304 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.355347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.355358 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.355378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.355390 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.447401 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:00 crc kubenswrapper[4938]: E1122 10:39:00.447529 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.457211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.457246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.457257 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.457271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.457282 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.559987 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.560072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.560090 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.560114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.560135 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.662200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.662228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.662237 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.662249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.662258 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.764650 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.764687 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.764698 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.764715 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.764725 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.866961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.867224 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.867316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.867402 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.867481 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.969384 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.969438 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.969448 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.969465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:00 crc kubenswrapper[4938]: I1122 10:39:00.969478 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:00Z","lastTransitionTime":"2025-11-22T10:39:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.072847 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.072893 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.072924 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.072943 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.072956 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.175514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.175604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.175622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.175647 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.175666 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.277829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.277886 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.277903 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.277954 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.277976 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.381048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.381093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.381103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.381121 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.381132 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.446992 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:01 crc kubenswrapper[4938]: E1122 10:39:01.447125 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.446992 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:01 crc kubenswrapper[4938]: E1122 10:39:01.447299 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.447415 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:01 crc kubenswrapper[4938]: E1122 10:39:01.447551 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.483445 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.483483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.483492 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.483506 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.483517 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.586728 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.586780 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.586792 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.586811 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.586823 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.689416 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.689472 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.689488 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.689511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.689528 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.791724 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.791760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.791769 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.791782 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.791791 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.894005 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.894035 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.894043 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.894056 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.894065 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.996076 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.996114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.996124 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.996137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:01 crc kubenswrapper[4938]: I1122 10:39:01.996146 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:01Z","lastTransitionTime":"2025-11-22T10:39:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.098626 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.098667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.098681 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.098695 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.098705 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.201300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.201371 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.201384 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.201411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.201425 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.304372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.304429 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.304452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.304483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.304506 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.406665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.406696 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.406705 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.406717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.406744 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.446653 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:02 crc kubenswrapper[4938]: E1122 10:39:02.446797 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.509143 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.509200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.509212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.509231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.509247 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.611455 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.611494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.611506 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.611524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.611540 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.714024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.714069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.714082 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.714098 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.714110 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.816678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.816751 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.816803 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.816821 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.816834 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.919152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.919206 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.919217 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.919234 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:02 crc kubenswrapper[4938]: I1122 10:39:02.919245 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:02Z","lastTransitionTime":"2025-11-22T10:39:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.021343 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.021397 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.021415 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.021439 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.021457 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.124439 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.124489 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.124501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.124520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.124538 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.226766 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.226846 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.226865 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.226891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.226938 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.328638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.328681 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.328692 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.328709 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.328724 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.431558 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.431826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.431999 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.432025 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.432043 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.447307 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.447434 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:03 crc kubenswrapper[4938]: E1122 10:39:03.447676 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:03 crc kubenswrapper[4938]: E1122 10:39:03.447467 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.447718 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:03 crc kubenswrapper[4938]: E1122 10:39:03.447902 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.535534 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.535614 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.535636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.535665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.535687 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.637812 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.637847 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.637855 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.637871 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.637880 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.740174 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.740218 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.740229 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.740244 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.740256 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.843589 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.843643 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.843655 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.843674 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.843687 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.945849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.945892 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.945900 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.945935 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:03 crc kubenswrapper[4938]: I1122 10:39:03.945946 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:03Z","lastTransitionTime":"2025-11-22T10:39:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.048982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.049054 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.049071 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.049098 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.049117 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.151964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.152043 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.152052 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.152066 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.152076 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.255180 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.255225 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.255234 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.255248 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.255258 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.357879 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.358016 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.358063 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.358094 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.358114 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.446742 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:04 crc kubenswrapper[4938]: E1122 10:39:04.446896 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.460891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.460980 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.460998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.461022 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.461058 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.564513 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.564606 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.564624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.564646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.564727 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.668511 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.668576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.668594 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.668618 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.668636 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.774050 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.774114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.774144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.774171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.774190 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.876520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.876574 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.876590 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.876610 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.876626 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.978970 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.979015 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.979026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.979042 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:04 crc kubenswrapper[4938]: I1122 10:39:04.979054 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:04Z","lastTransitionTime":"2025-11-22T10:39:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.081255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.081293 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.081302 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.081316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.081326 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.185428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.185476 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.185485 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.185499 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.185508 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.288060 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.288110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.288122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.288140 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.288157 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.390303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.390400 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.390419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.390446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.390467 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.447425 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.447494 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.447526 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:05 crc kubenswrapper[4938]: E1122 10:39:05.447599 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:05 crc kubenswrapper[4938]: E1122 10:39:05.447694 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:05 crc kubenswrapper[4938]: E1122 10:39:05.447820 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.493046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.493087 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.493096 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.493111 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.493120 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.595513 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.595606 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.595631 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.595666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.595846 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.699442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.699516 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.699533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.699560 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.699577 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.801831 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.801902 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.801958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.801985 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.802004 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.904161 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.904214 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.904230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.904252 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:05 crc kubenswrapper[4938]: I1122 10:39:05.904269 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:05Z","lastTransitionTime":"2025-11-22T10:39:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.007020 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.007109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.007138 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.007183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.007209 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.110598 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.111027 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.111211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.111382 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.111535 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.214206 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.214263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.214282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.214302 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.214313 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.317980 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.318021 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.318030 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.318044 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.318055 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.421282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.421341 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.421380 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.421420 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.421443 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.448337 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:06 crc kubenswrapper[4938]: E1122 10:39:06.448545 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.524316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.524399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.524419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.524445 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.524463 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.627413 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.627484 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.627493 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.627506 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.627518 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.730029 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.730190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.730213 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.730239 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.730260 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.832669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.832748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.832767 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.832791 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.832810 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.936328 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.936407 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.936424 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.936451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:06 crc kubenswrapper[4938]: I1122 10:39:06.936472 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:06Z","lastTransitionTime":"2025-11-22T10:39:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.039046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.039145 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.039164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.039235 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.039282 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.141964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.142037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.142064 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.142095 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.142117 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.244247 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.244354 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.244391 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.244407 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.244418 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.346878 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.346943 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.346951 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.346964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.346973 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.446721 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.446750 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.446721 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:07 crc kubenswrapper[4938]: E1122 10:39:07.446846 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:07 crc kubenswrapper[4938]: E1122 10:39:07.446932 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:07 crc kubenswrapper[4938]: E1122 10:39:07.446999 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.448597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.448617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.448629 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.448638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.448647 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.551180 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.551235 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.551255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.551279 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.551298 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.654696 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.654738 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.654756 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.654776 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.654793 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.758656 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.758713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.758730 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.758754 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.758771 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.864372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.864430 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.864442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.864458 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.864487 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.921928 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/0.log" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.921968 4938 generic.go:334] "Generic (PLEG): container finished" podID="671da3f6-347d-4f86-890d-155ef844b1f6" containerID="43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb" exitCode=1 Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.921995 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerDied","Data":"43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.922310 4938 scope.go:117] "RemoveContainer" containerID="43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.936414 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.952098 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.965151 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.966299 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.966330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.966342 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.966357 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.966367 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:07Z","lastTransitionTime":"2025-11-22T10:39:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.977287 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.989294 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:07 crc kubenswrapper[4938]: I1122 10:39:07.999654 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.011233 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.026755 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.040926 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.054733 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.065893 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.068575 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.068607 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.068617 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.068632 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.068644 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.081964 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.097239 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.109178 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.125362 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.135513 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.157618 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.170702 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.170961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.171101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.171219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.171586 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.273854 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.274087 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.274211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.274316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.274500 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.376574 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.376656 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.376678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.376713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.376736 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.446868 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:08 crc kubenswrapper[4938]: E1122 10:39:08.447013 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.447773 4938 scope.go:117] "RemoveContainer" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" Nov 22 10:39:08 crc kubenswrapper[4938]: E1122 10:39:08.447927 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.464262 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.477009 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.479460 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.479498 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.479537 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.479553 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.479563 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.489876 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.501816 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.513183 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.523320 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.533596 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.544060 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.560974 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.574039 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.582152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.582202 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.582214 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.582231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.582244 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.585460 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.595433 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.609523 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.620648 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.633810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.644889 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.657724 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.684126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.684158 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.684169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.684183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.684194 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.788039 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.788106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.788120 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.788145 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.788161 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.890387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.890430 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.890441 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.890454 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.890464 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.927230 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/0.log" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.927304 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerStarted","Data":"60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a"} Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.946040 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.961614 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.979317 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.992525 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.993781 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.993830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.993841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.993857 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:08 crc kubenswrapper[4938]: I1122 10:39:08.993869 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:08Z","lastTransitionTime":"2025-11-22T10:39:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.018744 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.030078 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.044145 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.060678 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.073196 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.085276 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096196 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096242 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096268 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096278 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.096842 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.111769 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.126227 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.139799 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.151438 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.165946 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.185182 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.198675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.198719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.198728 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.198743 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.198752 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.304832 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.304906 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.305000 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.305048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.305075 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.407622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.407669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.407692 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.407718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.407739 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.411904 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.411999 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.412022 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.412432 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.412711 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.433176 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.436924 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.436957 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.436974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.436990 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.437000 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.447290 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.447442 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.447858 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.452393 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.452960 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.453175 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.452864 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.457112 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.457155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.457169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.457186 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.457198 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.469143 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.472801 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.472852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.472864 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.472881 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.472895 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.488118 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.491406 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.491454 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.491483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.491509 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.491526 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.504653 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:09 crc kubenswrapper[4938]: E1122 10:39:09.504765 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.510122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.510162 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.510171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.510185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.510201 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.612507 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.612553 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.612569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.612589 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.612626 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.715393 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.715434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.715448 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.715465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.715476 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.818843 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.818950 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.818977 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.819004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.819030 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.922489 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.922533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.922547 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.922569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:09 crc kubenswrapper[4938]: I1122 10:39:09.922584 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:09Z","lastTransitionTime":"2025-11-22T10:39:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.025204 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.025264 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.025282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.025305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.025322 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.127751 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.127782 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.127789 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.127801 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.127810 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.230051 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.230097 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.230108 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.230123 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.230133 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.333165 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.333230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.333246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.333267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.333281 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.436622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.436665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.436675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.436691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.436702 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.447423 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:10 crc kubenswrapper[4938]: E1122 10:39:10.447576 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.540190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.540265 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.540286 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.540310 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.540328 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.643197 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.643587 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.643597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.643612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.643621 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.746763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.746811 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.746823 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.746841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.746855 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.848725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.848798 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.848817 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.848842 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.848859 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.950599 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.950669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.950692 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.950725 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:10 crc kubenswrapper[4938]: I1122 10:39:10.950747 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:10Z","lastTransitionTime":"2025-11-22T10:39:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.053869 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.053973 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.053994 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.054021 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.054041 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.156843 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.156901 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.156945 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.156964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.156976 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.260323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.260396 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.260409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.260428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.260442 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.362336 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.362367 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.362375 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.362388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.362404 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.446954 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.447015 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.447018 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:11 crc kubenswrapper[4938]: E1122 10:39:11.447172 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:11 crc kubenswrapper[4938]: E1122 10:39:11.447259 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:11 crc kubenswrapper[4938]: E1122 10:39:11.447361 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.465144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.465227 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.465254 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.465286 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.465305 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.569100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.569140 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.569151 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.569164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.569174 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.671608 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.671646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.671653 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.671667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.671677 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.773336 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.773374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.773381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.773394 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.773403 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.875166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.875217 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.875227 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.875258 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.875268 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.977690 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.977742 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.977754 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.977772 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:11 crc kubenswrapper[4938]: I1122 10:39:11.977784 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:11Z","lastTransitionTime":"2025-11-22T10:39:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.080412 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.080446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.080457 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.080473 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.080482 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.181890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.181937 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.181945 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.181957 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.181966 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.283724 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.283765 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.283774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.283790 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.283800 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.386687 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.386731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.386741 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.386758 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.386772 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.447034 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:12 crc kubenswrapper[4938]: E1122 10:39:12.447230 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.458717 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.489409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.489469 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.489487 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.489508 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.489525 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.591753 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.591797 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.591807 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.591823 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.591835 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.693751 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.693796 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.693807 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.693820 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.693830 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.796368 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.796449 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.796476 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.796503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.796521 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.899816 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.899853 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.899863 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.899876 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:12 crc kubenswrapper[4938]: I1122 10:39:12.899884 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:12Z","lastTransitionTime":"2025-11-22T10:39:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.002773 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.002806 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.002817 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.002831 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.002842 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.105493 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.105531 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.105541 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.105555 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.105565 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.208252 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.208319 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.208340 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.208367 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.208388 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.311185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.311257 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.311274 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.311328 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.311345 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.413665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.413700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.413710 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.413723 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.413732 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.447352 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.447352 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:13 crc kubenswrapper[4938]: E1122 10:39:13.447553 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.447366 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:13 crc kubenswrapper[4938]: E1122 10:39:13.447703 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:13 crc kubenswrapper[4938]: E1122 10:39:13.447852 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.517359 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.517451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.517477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.517508 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.517531 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.620536 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.620616 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.620637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.620667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.620690 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.723832 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.723873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.723884 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.723900 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.723928 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.826296 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.826347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.826364 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.826386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.826400 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.929719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.929784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.929802 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.929826 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:13 crc kubenswrapper[4938]: I1122 10:39:13.929844 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:13Z","lastTransitionTime":"2025-11-22T10:39:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.032031 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.032099 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.032124 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.032154 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.032178 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.135460 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.135537 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.135560 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.135591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.135614 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.239939 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.240332 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.240433 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.240540 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.240639 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.342986 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.343045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.343055 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.343071 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.343079 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446324 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446350 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446357 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446369 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446379 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.446520 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:14 crc kubenswrapper[4938]: E1122 10:39:14.446707 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.549475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.549512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.549522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.549534 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.549545 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.652135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.652193 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.652209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.652228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.652240 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.755888 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.756291 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.756596 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.756811 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.756998 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.859908 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.860101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.860133 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.860160 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.860181 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.962142 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.963109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.963159 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.963191 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:14 crc kubenswrapper[4938]: I1122 10:39:14.963213 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:14Z","lastTransitionTime":"2025-11-22T10:39:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.066231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.066312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.066331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.066354 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.066371 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.169193 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.169245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.169256 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.169270 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.169278 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.271310 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.271362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.271379 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.271402 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.271418 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.272796 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.272972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.273010 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:19.272988092 +0000 UTC m=+151.740825501 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.273044 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.273168 4938 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.273213 4938 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.273266 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:19.273252669 +0000 UTC m=+151.741090078 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.273290 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:19.273277919 +0000 UTC m=+151.741115338 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.374550 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.374596 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.374612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.374635 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.374656 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.446718 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.446795 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.447000 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.447031 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.447123 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.447278 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.474772 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.474839 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475077 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475094 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475117 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475135 4938 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475142 4938 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475156 4938 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475241 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:19.475212727 +0000 UTC m=+151.943050186 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:39:15 crc kubenswrapper[4938]: E1122 10:39:15.475278 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:19.475262719 +0000 UTC m=+151.943100158 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.477751 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.477823 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.477841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.477867 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.477887 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.581282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.581352 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.581375 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.581400 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.581418 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.684008 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.684077 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.684097 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.684123 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.684140 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.786932 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.786968 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.786976 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.786988 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.786996 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.889327 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.889383 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.889399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.889418 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.889430 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.991832 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.991889 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.991906 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.991989 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:15 crc kubenswrapper[4938]: I1122 10:39:15.992021 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:15Z","lastTransitionTime":"2025-11-22T10:39:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.095049 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.095094 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.095106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.095126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.095138 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.198004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.198051 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.198059 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.198072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.198081 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.300353 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.300413 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.300431 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.300455 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.300473 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.403470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.403530 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.403554 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.403689 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.403716 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.446749 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:16 crc kubenswrapper[4938]: E1122 10:39:16.447045 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.505859 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.505907 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.505961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.505988 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.506001 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.608267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.608302 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.608313 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.608327 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.608337 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.711091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.711155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.711173 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.711199 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.711216 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.814162 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.814227 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.814243 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.814268 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.814289 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.917023 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.917079 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.917092 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.917111 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:16 crc kubenswrapper[4938]: I1122 10:39:16.917125 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:16Z","lastTransitionTime":"2025-11-22T10:39:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.020538 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.020597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.020613 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.020637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.020655 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.123731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.123792 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.123810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.123835 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.123853 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.226963 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.227397 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.227606 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.227760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.227884 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.331980 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.332041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.332059 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.332094 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.332115 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.436020 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.436090 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.436116 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.436152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.436181 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.447461 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.447485 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.447615 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:17 crc kubenswrapper[4938]: E1122 10:39:17.448380 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:17 crc kubenswrapper[4938]: E1122 10:39:17.448649 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:17 crc kubenswrapper[4938]: E1122 10:39:17.448998 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.540130 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.540212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.540232 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.540261 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.540282 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.643990 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.644478 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.644664 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.644810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.644986 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.748374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.748434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.748451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.748475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.748493 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.852126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.852218 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.852260 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.852294 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.852370 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.955703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.955769 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.955787 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.955818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:17 crc kubenswrapper[4938]: I1122 10:39:17.955838 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:17Z","lastTransitionTime":"2025-11-22T10:39:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.060610 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.060693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.060714 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.060748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.060778 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.163877 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.163989 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.164014 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.164045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.164069 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.268170 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.268245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.268263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.268289 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.268308 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.371355 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.371394 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.371406 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.371422 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.371433 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.447132 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:18 crc kubenswrapper[4938]: E1122 10:39:18.447264 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.464597 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.474010 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.474075 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.474094 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.474122 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.474149 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.481738 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.498419 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.517971 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.538479 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.555344 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.576700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.576774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.576797 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.576828 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.576851 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.586861 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.605886 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.621972 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.640977 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.665822 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680439 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680546 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680564 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.680969 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.694623 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.712256 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.736376 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.753876 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.767393 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.779873 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:18Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.782430 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.782496 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.782522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.782601 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.782627 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.886573 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.886666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.886687 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.886706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.886718 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.989277 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.989338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.989376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.989403 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:18 crc kubenswrapper[4938]: I1122 10:39:18.989422 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:18Z","lastTransitionTime":"2025-11-22T10:39:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.092590 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.093146 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.093223 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.093293 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.093355 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.196250 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.196293 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.196303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.196320 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.196331 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.299368 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.299441 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.299464 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.299494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.299516 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.402314 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.402387 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.402410 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.402444 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.402466 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.447395 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.447394 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.447506 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.447631 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.447794 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.447906 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.505342 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.505399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.505417 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.505442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.505462 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.609148 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.609271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.609300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.609330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.609353 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.693205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.693271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.693297 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.693330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.693355 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.714033 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.720546 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.720601 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.720620 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.720645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.720663 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.736837 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.741471 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.741505 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.741518 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.741539 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.741556 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.759190 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.763887 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.763967 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.764000 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.764026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.764044 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.783238 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.787432 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.787474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.787486 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.787503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.787516 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.800994 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:19 crc kubenswrapper[4938]: E1122 10:39:19.801157 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.804323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.804359 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.804369 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.804388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.804400 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.907181 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.907224 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.907235 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.907253 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:19 crc kubenswrapper[4938]: I1122 10:39:19.907263 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:19Z","lastTransitionTime":"2025-11-22T10:39:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.009974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.010031 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.010047 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.010069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.010085 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.113250 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.113303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.113319 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.113341 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.113354 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.217224 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.217293 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.217321 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.217349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.217368 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.319679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.319735 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.319749 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.319771 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.319786 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.423140 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.423219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.423256 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.423286 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.423310 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.447107 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:20 crc kubenswrapper[4938]: E1122 10:39:20.447278 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.448617 4938 scope.go:117] "RemoveContainer" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.525502 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.525561 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.525581 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.525610 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.525630 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.628219 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.628277 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.628290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.628309 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.628323 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.737683 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.737748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.737761 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.737779 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.737794 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.840981 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.841089 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.841114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.841187 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.841211 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.945464 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.945536 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.945556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.945585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.945607 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:20Z","lastTransitionTime":"2025-11-22T10:39:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.983548 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/2.log" Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.986456 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c"} Nov 22 10:39:20 crc kubenswrapper[4938]: I1122 10:39:20.986944 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.012541 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.028940 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.047900 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.048018 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.048179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.048191 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.048209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.048245 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.075132 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.098456 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.123615 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.141761 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.151177 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.151222 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.151233 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.151248 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.151258 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.158600 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.189646 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.207218 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.217498 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.229889 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.242860 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.253655 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.253704 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.253717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.253736 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.253747 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.260698 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.274252 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.287027 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.306538 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.316416 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:21Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.355415 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.355488 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.355502 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.355518 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.355532 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.446502 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.446577 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.446502 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:21 crc kubenswrapper[4938]: E1122 10:39:21.446636 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:21 crc kubenswrapper[4938]: E1122 10:39:21.446721 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:21 crc kubenswrapper[4938]: E1122 10:39:21.446793 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.457629 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.457667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.457678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.457694 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.457706 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.559731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.559792 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.559800 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.559813 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.559825 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.662249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.662295 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.662311 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.662332 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.662350 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.766475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.766556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.766572 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.766592 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.766633 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.869133 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.869196 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.869221 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.869254 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.869276 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.971901 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.971973 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.971986 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.972024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.972038 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:21Z","lastTransitionTime":"2025-11-22T10:39:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.991571 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/3.log" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.992512 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/2.log" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.995488 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" exitCode=1 Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.995530 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c"} Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.995568 4938 scope.go:117] "RemoveContainer" containerID="d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32" Nov 22 10:39:21 crc kubenswrapper[4938]: I1122 10:39:21.996028 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:39:21 crc kubenswrapper[4938]: E1122 10:39:21.996163 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.011305 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.027736 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.047327 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8d8a285f573b9652da8ae873a9a7f93d6d599c2573520405c71063bc0c19a32\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:38:53Z\\\",\\\"message\\\":\\\" event on pod openshift-image-registry/node-ca-8l8nr\\\\nI1122 10:38:53.317793 6703 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-s7w5f in node crc\\\\nI1122 10:38:53.317575 6703 services_controller.go:360] Finished syncing service controller-manager on namespace openshift-controller-manager for network=default : 2.674971ms\\\\nI1122 10:38:53.317825 6703 services_controller.go:356] Processing sync for service openshift-kube-storage-version-migrator-operator/metrics for network=default\\\\nI1122 10:38:53.317751 6703 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1122 10:38:53.317962 6703 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-multus/multus-admission-controller\\\\\\\"}\\\\nI1122 10:38:53.317992 6703 services_controller.go:360] Finished syncing service multus-admission-controller on namespace openshift-multus for network=default : 1.112369ms\\\\nI1122 10:38:53.318019 6703 services_controller.go:356] Processing sync for service openshift-machine-api/control-plane-machine-set-operator for network=default\\\\nI1122 10:38:53.317734 6703 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1122 10:38:53.318067 6703 ovn.go:134] Ensuring zo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:21Z\\\",\\\"message\\\":\\\"5-08-24T17:21:41Z\\\\nI1122 10:39:21.527318 7057 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1122 10:39:21.526775 7057 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/community-operators\\\\\\\"}\\\\nI1122 10:39:21.527455 7057 services_controller.go:360] Finished syncing service community-operators on namespace openshift-marketplace for network=default : 1.89203ms\\\\nI1122 10:39:21.527466 7057 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1122 10:39:21.527468 7057 services_controller.go:356] Processing sync for service openshift-machine-config-operator/machine-config-daemon for network=default\\\\nF1122 10:39:21.527517 7057 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed call\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.059653 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.073169 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.075709 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.075749 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.075763 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.075787 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.075802 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.086542 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.098757 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.111600 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.122472 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.136620 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.150689 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.161180 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.171389 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.178263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.178305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.178316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.178335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.178347 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.183798 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.195229 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.206939 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.219513 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.232750 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.280947 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.280989 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.281005 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.281026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.281045 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.384029 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.384092 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.384110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.384138 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.384157 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.447328 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:22 crc kubenswrapper[4938]: E1122 10:39:22.447496 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.486680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.486734 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.486748 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.486768 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.486781 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.589351 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.589392 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.589403 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.589419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.589429 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.691788 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.691864 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.691888 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.691943 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.691968 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.796001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.796255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.796269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.796283 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.796294 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.898890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.898984 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.899007 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.899037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.899058 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:22Z","lastTransitionTime":"2025-11-22T10:39:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:22 crc kubenswrapper[4938]: I1122 10:39:22.999855 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/3.log" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.002307 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.002378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.002401 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.002428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.002445 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.004586 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:39:23 crc kubenswrapper[4938]: E1122 10:39:23.004890 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.027651 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:21Z\\\",\\\"message\\\":\\\"5-08-24T17:21:41Z\\\\nI1122 10:39:21.527318 7057 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1122 10:39:21.526775 7057 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/community-operators\\\\\\\"}\\\\nI1122 10:39:21.527455 7057 services_controller.go:360] Finished syncing service community-operators on namespace openshift-marketplace for network=default : 1.89203ms\\\\nI1122 10:39:21.527466 7057 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1122 10:39:21.527468 7057 services_controller.go:356] Processing sync for service openshift-machine-config-operator/machine-config-daemon for network=default\\\\nF1122 10:39:21.527517 7057 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed call\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.045730 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.062405 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.082271 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.094326 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.104848 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.105183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.105231 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.105246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.105267 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.105285 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.116077 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.131434 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.145082 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.154676 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.163798 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.172696 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.183464 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.196493 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.207419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.207477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.207493 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.207517 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.207533 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.211828 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.223299 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.236111 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.248578 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:23Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.310829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.311166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.311324 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.311476 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.311772 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.415120 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.415437 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.415555 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.415662 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.415760 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.446903 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.446988 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.446988 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:23 crc kubenswrapper[4938]: E1122 10:39:23.447274 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:23 crc kubenswrapper[4938]: E1122 10:39:23.447402 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:23 crc kubenswrapper[4938]: E1122 10:39:23.447482 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.518614 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.518674 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.518686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.518703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.518713 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.621160 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.621197 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.621205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.621222 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.621277 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.724093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.724160 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.724182 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.724214 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.724236 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.831713 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.831790 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.831814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.831847 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.831872 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.934852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.934962 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.934987 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.935016 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:23 crc kubenswrapper[4938]: I1122 10:39:23.935037 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:23Z","lastTransitionTime":"2025-11-22T10:39:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.036808 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.036881 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.036908 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.036984 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.037007 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.139979 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.140046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.140072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.140103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.140128 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.243440 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.243488 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.243503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.243523 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.243535 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.345691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.345760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.345784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.345814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.345837 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.446462 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:24 crc kubenswrapper[4938]: E1122 10:39:24.446646 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.448091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.448154 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.448172 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.448198 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.448216 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.551443 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.551842 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.552299 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.552508 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.552750 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.656071 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.656498 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.656711 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.656899 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.657129 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.760347 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.760576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.760669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.760732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.760796 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.863353 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.863389 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.863399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.863415 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.863428 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.966288 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.966358 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.966376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.966404 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:24 crc kubenswrapper[4938]: I1122 10:39:24.966426 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:24Z","lastTransitionTime":"2025-11-22T10:39:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.069396 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.069461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.069474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.069494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.069508 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.172940 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.172985 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.172997 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.173013 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.173025 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.275645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.276238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.276317 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.276389 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.276484 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.379183 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.379238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.379254 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.379273 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.379291 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.447377 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.447391 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:25 crc kubenswrapper[4938]: E1122 10:39:25.447565 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:25 crc kubenswrapper[4938]: E1122 10:39:25.447829 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.447976 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:25 crc kubenswrapper[4938]: E1122 10:39:25.448148 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.481993 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.482073 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.482093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.482123 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.482189 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.585866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.585949 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.585967 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.585991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.586014 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.690158 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.690345 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.690368 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.690431 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.690462 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.793770 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.793827 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.793840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.793861 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.793875 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.897027 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.897068 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.897079 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.897108 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.897122 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.999820 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.999862 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.999874 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.999891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:25 crc kubenswrapper[4938]: I1122 10:39:25.999902 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:25Z","lastTransitionTime":"2025-11-22T10:39:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.103983 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.104051 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.104077 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.104108 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.104132 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.209078 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.209141 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.209151 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.209174 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.209216 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.312765 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.312846 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.312866 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.312899 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.312956 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.422876 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.423234 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.423436 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.423601 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.423723 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.463245 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:26 crc kubenswrapper[4938]: E1122 10:39:26.463614 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.527337 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.527629 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.527753 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.527894 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.528075 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.631522 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.631814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.632091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.632260 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.632392 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.736425 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.736820 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.736987 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.737135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.737302 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.840715 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.840814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.840837 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.840875 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.840902 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.944209 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.944282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.944305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.944335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:26 crc kubenswrapper[4938]: I1122 10:39:26.944357 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:26Z","lastTransitionTime":"2025-11-22T10:39:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.047154 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.047477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.047494 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.047518 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.047536 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.150955 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.151072 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.151096 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.151140 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.151174 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.254137 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.254194 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.254208 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.254232 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.254260 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.358269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.358353 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.358378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.358409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.358428 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.447375 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:27 crc kubenswrapper[4938]: E1122 10:39:27.447577 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.447895 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:27 crc kubenswrapper[4938]: E1122 10:39:27.448056 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.448208 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:27 crc kubenswrapper[4938]: E1122 10:39:27.448458 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.462435 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.462503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.462524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.462550 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.462571 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.566255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.566340 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.566362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.566399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.566426 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.669992 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.670061 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.670073 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.670094 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.670108 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.773458 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.773512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.773524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.773544 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.773557 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.876331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.876392 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.876414 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.876442 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.876461 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.979045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.979110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.979126 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.979150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:27 crc kubenswrapper[4938]: I1122 10:39:27.979166 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:27Z","lastTransitionTime":"2025-11-22T10:39:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.081889 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.082005 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.082030 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.082064 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.082087 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.185076 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.185150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.185175 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.185206 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.185238 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.288246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.288311 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.288332 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.288356 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.288374 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.390506 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.390628 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.390649 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.390675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.390693 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.446798 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:28 crc kubenswrapper[4938]: E1122 10:39:28.447062 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.469534 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.484120 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.494091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.494150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.494974 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.495025 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.495045 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.500786 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.517579 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.543241 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.562633 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.579025 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.599317 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.599380 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.599393 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.599411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.599442 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.602027 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.615810 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.635777 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.652001 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.666864 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.683708 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.701791 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.701831 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.701844 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.701863 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.701875 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.705321 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.721661 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.740777 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.754430 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.779043 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:21Z\\\",\\\"message\\\":\\\"5-08-24T17:21:41Z\\\\nI1122 10:39:21.527318 7057 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1122 10:39:21.526775 7057 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/community-operators\\\\\\\"}\\\\nI1122 10:39:21.527455 7057 services_controller.go:360] Finished syncing service community-operators on namespace openshift-marketplace for network=default : 1.89203ms\\\\nI1122 10:39:21.527466 7057 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1122 10:39:21.527468 7057 services_controller.go:356] Processing sync for service openshift-machine-config-operator/machine-config-daemon for network=default\\\\nF1122 10:39:21.527517 7057 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed call\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.804436 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.804484 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.804501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.804525 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.804543 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.907591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.907633 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.907645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.907666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:28 crc kubenswrapper[4938]: I1122 10:39:28.907678 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:28Z","lastTransitionTime":"2025-11-22T10:39:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.010109 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.010179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.010205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.010237 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.010260 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.113482 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.113551 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.113573 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.113605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.113628 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.216420 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.216475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.216490 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.216512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.216526 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.320362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.320427 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.320444 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.320472 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.320494 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.423288 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.423331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.423342 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.423360 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.423373 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.447078 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.447078 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.447217 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:29 crc kubenswrapper[4938]: E1122 10:39:29.447413 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:29 crc kubenswrapper[4938]: E1122 10:39:29.447545 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:29 crc kubenswrapper[4938]: E1122 10:39:29.447671 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.526093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.526135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.526147 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.526166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.526180 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.630147 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.630240 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.630259 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.630290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.630312 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.734498 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.734591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.734680 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.734719 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.734744 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.839034 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.839116 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.839138 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.839172 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.839194 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.943218 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.943282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.943300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.943328 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.943347 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:29Z","lastTransitionTime":"2025-11-22T10:39:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:29 crc kubenswrapper[4938]: I1122 10:39:29.945750 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:29 crc kubenswrapper[4938]: E1122 10:39:29.946090 4938 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:39:29 crc kubenswrapper[4938]: E1122 10:39:29.946265 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs podName:7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c nodeName:}" failed. No retries permitted until 2025-11-22 10:40:33.946223996 +0000 UTC m=+166.414061585 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs") pod "network-metrics-daemon-s7w5f" (UID: "7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.015622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.015705 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.015731 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.015760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.015779 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.048009 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.054322 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.054383 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.054401 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.054428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.054449 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.078061 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.085017 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.085064 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.085079 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.085100 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.085117 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.103198 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.109026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.109114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.109124 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.109155 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.109165 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.125052 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.129018 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.129043 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.129053 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.129069 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.129080 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.141968 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.142122 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.143678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.143828 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.144082 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.144224 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.144348 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.248390 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.248455 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.248473 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.248500 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.248524 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.351708 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.351777 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.351796 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.351849 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.351870 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.447124 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:30 crc kubenswrapper[4938]: E1122 10:39:30.447334 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.454876 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.454998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.455024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.455058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.455086 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.558282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.558362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.558378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.558449 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.558469 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.661999 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.662105 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.662135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.662169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.662190 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.765383 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.766001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.766020 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.766037 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.766048 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.869736 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.869784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.869815 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.869840 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.869852 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.972764 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.972805 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.972818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.972834 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:30 crc kubenswrapper[4938]: I1122 10:39:30.972846 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:30Z","lastTransitionTime":"2025-11-22T10:39:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.075531 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.075572 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.075584 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.075602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.075615 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.179323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.179381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.179398 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.179419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.179436 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.282841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.282959 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.282982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.283008 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.283026 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.386501 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.386581 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.386602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.386630 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.386649 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.446495 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.446497 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.446805 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:31 crc kubenswrapper[4938]: E1122 10:39:31.446973 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:31 crc kubenswrapper[4938]: E1122 10:39:31.447053 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:31 crc kubenswrapper[4938]: E1122 10:39:31.447210 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.469765 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.489841 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.489999 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.490018 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.490077 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.490098 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.592873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.592963 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.592986 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.593019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.593041 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.696089 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.696152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.696171 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.696200 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.696219 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.799817 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.799879 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.799896 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.799952 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.799971 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.903636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.903711 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.903736 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.903773 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:31 crc kubenswrapper[4938]: I1122 10:39:31.903799 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:31Z","lastTransitionTime":"2025-11-22T10:39:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.006852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.006955 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.006973 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.006998 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.007014 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.110048 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.110093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.110106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.110128 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.110140 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.212975 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.213036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.213058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.213088 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.213109 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.316198 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.316308 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.316335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.316366 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.316388 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.419067 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.419184 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.419204 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.419224 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.419238 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.448276 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:32 crc kubenswrapper[4938]: E1122 10:39:32.448716 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.522481 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.522759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.522854 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.522973 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.523080 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.625349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.625623 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.625692 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.625757 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.625816 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.729330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.729614 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.729772 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.729958 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.730158 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.833078 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.833128 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.833147 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.833169 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.833208 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.936308 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.936397 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.936426 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.936461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:32 crc kubenswrapper[4938]: I1122 10:39:32.936487 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:32Z","lastTransitionTime":"2025-11-22T10:39:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.038956 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.039018 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.039036 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.039058 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.039079 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.142585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.142648 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.142666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.142696 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.142713 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.245410 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.245477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.245499 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.245528 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.245549 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.348038 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.348102 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.348124 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.348150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.348168 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.447338 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.447371 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:33 crc kubenswrapper[4938]: E1122 10:39:33.447498 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.447568 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:33 crc kubenswrapper[4938]: E1122 10:39:33.447670 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:33 crc kubenswrapper[4938]: E1122 10:39:33.447896 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.451947 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.452006 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.452024 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.452047 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.452067 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.554844 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.554946 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.554965 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.554990 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.555009 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.657825 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.657870 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.657883 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.657898 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.657922 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.760190 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.760238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.760247 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.760262 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.760272 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.863139 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.863174 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.863184 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.863196 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.863205 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.965645 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.966301 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.966329 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.966346 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:33 crc kubenswrapper[4938]: I1122 10:39:33.966360 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:33Z","lastTransitionTime":"2025-11-22T10:39:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.068678 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.068739 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.068759 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.068785 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.068804 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.172582 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.173052 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.173252 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.173419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.173577 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.277108 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.277417 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.277580 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.277706 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.277834 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.380858 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.380960 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.380982 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.381009 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.381027 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.448175 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:34 crc kubenswrapper[4938]: E1122 10:39:34.448548 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.457332 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:39:34 crc kubenswrapper[4938]: E1122 10:39:34.458057 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.483698 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.483975 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.484156 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.484376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.484562 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.588255 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.588331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.588356 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.588385 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.588408 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.692419 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.692480 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.692498 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.692525 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.692541 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.796249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.796596 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.796821 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.797060 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.797243 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.900279 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.900325 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.900341 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.900363 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:34 crc kubenswrapper[4938]: I1122 10:39:34.900375 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:34Z","lastTransitionTime":"2025-11-22T10:39:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.003533 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.004075 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.004277 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.004425 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.004572 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.108337 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.108397 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.108413 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.108437 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.108458 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.210792 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.210842 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.210854 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.210870 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.210881 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.312891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.312965 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.312977 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.312996 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.313010 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.416101 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.416142 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.416150 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.416164 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.416173 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.446800 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.446887 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:35 crc kubenswrapper[4938]: E1122 10:39:35.446983 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.446812 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:35 crc kubenswrapper[4938]: E1122 10:39:35.447105 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:35 crc kubenswrapper[4938]: E1122 10:39:35.447361 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.522589 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.522638 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.522653 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.522669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.522683 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.625307 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.625363 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.625379 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.625397 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.625410 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.728287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.728576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.728666 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.728755 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.728842 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.831615 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.831896 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.832001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.832103 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.832189 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.935409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.935454 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.935465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.935486 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:35 crc kubenswrapper[4938]: I1122 10:39:35.935498 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:35Z","lastTransitionTime":"2025-11-22T10:39:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.038417 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.038463 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.038475 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.038491 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.038504 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.141233 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.141266 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.141276 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.141289 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.141300 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.243836 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.243904 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.243942 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.243961 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.243974 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.346752 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.346793 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.346804 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.346819 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.346829 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.447064 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:36 crc kubenswrapper[4938]: E1122 10:39:36.447227 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.449110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.449159 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.449175 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.449198 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.449214 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.551625 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.552952 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.553211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.553445 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.553662 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.656056 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.656141 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.656156 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.656175 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.656189 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.759006 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.759059 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.759068 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.759084 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.759095 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.860969 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.861016 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.861028 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.861045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.861055 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.963388 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.963438 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.963452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.963470 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:36 crc kubenswrapper[4938]: I1122 10:39:36.963484 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:36Z","lastTransitionTime":"2025-11-22T10:39:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.065582 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.065652 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.065669 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.065685 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.065697 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.168703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.168757 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.168774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.168795 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.168812 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.271260 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.271299 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.271312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.271328 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.271339 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.373457 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.373499 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.373508 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.373521 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.373531 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.446528 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.446580 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.446528 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:37 crc kubenswrapper[4938]: E1122 10:39:37.446656 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:37 crc kubenswrapper[4938]: E1122 10:39:37.446735 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:37 crc kubenswrapper[4938]: E1122 10:39:37.446882 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.475335 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.475370 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.475378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.475390 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.475400 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.577585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.578115 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.578213 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.578285 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.578348 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.680146 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.680185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.680195 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.680211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.680224 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.782542 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.782579 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.782591 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.782603 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.782613 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.884483 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.884512 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.884520 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.884532 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.884540 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.986554 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.986597 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.986608 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.986624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:37 crc kubenswrapper[4938]: I1122 10:39:37.986671 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:37Z","lastTransitionTime":"2025-11-22T10:39:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.089921 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.089971 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.089984 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.090001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.090012 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.192873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.193014 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.193034 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.193057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.193074 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.295552 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.295592 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.295604 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.295620 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.295631 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.397303 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.397356 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.397381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.397410 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.397434 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.446732 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:38 crc kubenswrapper[4938]: E1122 10:39:38.446906 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.459114 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b7c4f2f4ac8f16d43dc2c66e5977beb5961fdba335ef7e272a9a72edb1fa79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.470094 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38b6f8db110295b8dccd12010bde84181bb83455733c2202d67098c085de697f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f05af6e05a6a77ad59b86f011b213315683d01adb8d0c86340dd3dc4300e451\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.483061 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fcb9956e-5cbe-41c3-8b90-08bbb0876319\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8b95ac563d192fa51cf145c4ae1ed8702789397a62b4a25cb7a7763fab3d47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b46eade678be978437c0a79ce2fa5b96351b53577b3f8235d18f6124cf69116\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a8b166d409522f6eacbf02e146641c55e1473ae57a27a6cb21eed233c4d502\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac44b8426b90f7bcf5e75eaa7db2d56a11f81129b2ec023f4bb0ff18b2590fea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.494141 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.500827 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.500891 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.500947 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.500980 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.501003 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.506233 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.515274 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-xdnvn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"13ff9c31-ec9f-417b-8237-65660901d3ad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4340bb7aa579eb10167fdf0e4be53948c16e36e6feeee9ed116a4648d8f57d53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kpp5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-xdnvn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.532796 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8b4b8200-248f-47ae-bed3-cbfd4598b99d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:21Z\\\",\\\"message\\\":\\\"5-08-24T17:21:41Z\\\\nI1122 10:39:21.527318 7057 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1122 10:39:21.526775 7057 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/community-operators\\\\\\\"}\\\\nI1122 10:39:21.527455 7057 services_controller.go:360] Finished syncing service community-operators on namespace openshift-marketplace for network=default : 1.89203ms\\\\nI1122 10:39:21.527466 7057 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1122 10:39:21.527468 7057 services_controller.go:356] Processing sync for service openshift-machine-config-operator/machine-config-daemon for network=default\\\\nF1122 10:39:21.527517 7057 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed call\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kz2bx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-8sphc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.543132 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e2b98cee-eb10-409f-93b6-153856457611\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7481d70258a74eccb9b019a6ad53755d20a8aa9b00e93fb7ac8053d253a2f2a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mnjbh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-slzgc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.554483 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db6a2428-0ce2-4754-8876-b95a9470a769\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18aedae60b495c4536790ffe9413655748b1220d716143b69d3bce1953dd8837\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0c60eba2889922de199a6b667961ccfb4d7ceb8c44533ac29eea90b51610d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r75cw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ngpcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.566032 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c75cd5ce-6a41-4b7e-a4aa-909a83beff6a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a55209f4ad0cb94008b76e4336227459f720d4cb476afe1126ff883391b9452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f964f1561ad054a46ef1c6c164f154d10b7f3bed4720f9aef661221ce597630\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://51d7c1cb4ed3203ccdb587437e7ce0b47ca4b7ac3c14b0e6da48f9d022fd93d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d2ed0191dd7384bffdba0933ca7ca3c8021fba8cbc5466f6a60bf7a261425bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.577246 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3a4a31d2-b239-442b-a248-289d76aecbcb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07938ae054990b95bf199723d4ba10219ef04f507543e4fd3890a99182b6e2e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a620bf7f1f9c8776bb447038e481156ce628725a1d81c06dd07f42cc365dc02e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://626f9560c7f12b7511e87ff7e748dd5594c270b496433fcee6d00142454c17ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5da0ea350ccb8e550e0e807d3a331f996aa87b8580586827c129198235dd7cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d24569db7b7dd30f833b0588464ec6055545ac7673e351f64319e3b98c456683\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"iserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 10:38:05.499854 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:38:05.500561 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3741504069/tls.crt::/tmp/serving-cert-3741504069/tls.key\\\\\\\"\\\\nI1122 10:38:11.061543 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:38:11.080057 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:38:11.080085 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:38:11.080107 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:38:11.080115 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:38:11.116549 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:38:11.116587 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116592 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:38:11.116596 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 10:38:11.116599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:38:11.116602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:38:11.116605 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1122 10:38:11.116823 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1122 10:38:11.134161 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:38:11.134254 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c79dfafe25ab7c0e7dde43fed04e3148db88b52d7a93f0a27d1fb3a456d500ce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e06a0117877d55235fb61dc6081c5154a16b084fb0fc6fd4c93ed5252f5686a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.591137 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.602391 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10b463fd797546760d88c5055b3e96b5b44464c5f686119e43246277846c9781\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.602964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.603001 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.603011 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.603026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.603036 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.615462 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-6kr67" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a430bfdd-4d1d-4bda-82ec-884f775af556\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd14077186af30c27867f9ac8f5ffe712c9d717beeae04d1aefc51f9db909ee3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b82c9f2b1430bc28ff9a1f219eeada12c787a3fe717c929f0fcf678cc1999b2a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f5cd5c6f7ef184bd8b55ff0fefd9a84fced3a9b99c0948e4a3c0abab924380a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9d00163b5a8b76779d2c13eb53e617a1f515d37c6bb23504b44e38686f6a35c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba4f33912dee3ed0244d01d5c906563af46d9939a1c932a9c969711708ffadcc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://584c5d1ccd60528d1df31e79e5c31b591761aa7e26e340ea7f68c8ca33cfa012\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d218bd7b790533e13bac1c5ea50cc0669fbf738d279a756f0e49ed7d2857c8ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:38:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sgw8t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-6kr67\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.628789 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j67hq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"671da3f6-347d-4f86-890d-155ef844b1f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:39:07Z\\\",\\\"message\\\":\\\"2025-11-22T10:38:20+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7\\\\n2025-11-22T10:38:20+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_be3ca8db-e8a8-471a-b4fa-30b2c33a34a7 to /host/opt/cni/bin/\\\\n2025-11-22T10:38:22Z [verbose] multus-daemon started\\\\n2025-11-22T10:38:22Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:39:07Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:38:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxdk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j67hq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.637969 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8l8nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3e57739-79cd-498f-8e4b-8423b0fb5306\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a2e5cd8fa5a0cedfd7c139a83036d211bfbd6c99c8f5c4bf38bb5ec2190a75b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:38:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9j4sd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8l8nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.649124 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m5qtx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:38:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s7w5f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.659431 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd6322b1-cd8f-48d9-8564-d186093cf4b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2666bf2a66ad2ce74076f80bb6c6f2f84bd0f8bdc182383bec5939a141c238b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0045ec4812268f0c075aac4b1a1550742767f186e267d6d7f7bfd5627caddd64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.678233 4938 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1a0c1be2-5e36-4811-810d-eff8e4797bef\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:38:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:37:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08cb862961be7704390c7914e6b1b07ec5acfc0a8684b219628b3374e0503cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8d0c1e1dce4c3b36595c21c681c2bf72797b4c64154e28eb9c5764c01e3e2b25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b66c1cb8d341db26bfd1f5b341172bb1f349df3f2876644ab78c31c9b4ce36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3d43c7f41bef25e5f9e54cc4bad63528c65b3d8fbc973019dd23cd8ca518af4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bea9ce72194c381c26a26ad8785fcbf3814d9ef4550e0a10fa9038e22fa1ed0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:37:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0248a3ffa73fa20db2fc5ddf0324410eacc266c322e55227dbcf8341c42cc559\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0248a3ffa73fa20db2fc5ddf0324410eacc266c322e55227dbcf8341c42cc559\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6a44c1742b72de50ae3e79ad9bea06bd2c7b943229916b9b8df8d8115dff0f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6a44c1742b72de50ae3e79ad9bea06bd2c7b943229916b9b8df8d8115dff0f7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:50Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://707a8f855bd1dfa346be2997b137323cc07ec7bb2565c5b61de2b2e2197948a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://707a8f855bd1dfa346be2997b137323cc07ec7bb2565c5b61de2b2e2197948a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:37:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:37:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:37:48Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:38Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.710089 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.710161 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.710172 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.710188 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.710199 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.813308 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.813350 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.813362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.813378 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.813404 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.915505 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.915556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.915566 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.915579 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:38 crc kubenswrapper[4938]: I1122 10:39:38.915587 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:38Z","lastTransitionTime":"2025-11-22T10:39:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.018588 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.018655 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.018670 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.018691 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.018708 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.120642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.120675 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.120703 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.120716 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.120725 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.223246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.223289 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.223300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.223316 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.223326 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.325366 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.325414 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.325428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.325446 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.325457 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.427722 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.427795 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.427814 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.428306 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.428364 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.447311 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.447394 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.447333 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:39 crc kubenswrapper[4938]: E1122 10:39:39.447464 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:39 crc kubenswrapper[4938]: E1122 10:39:39.447651 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:39 crc kubenswrapper[4938]: E1122 10:39:39.447772 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.531241 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.531291 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.531300 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.531314 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.531324 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.634009 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.634038 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.634046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.634062 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.634071 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.735592 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.735624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.735644 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.735656 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.735664 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.838276 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.838310 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.838320 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.838334 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.838345 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.941246 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.941290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.941312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.941331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:39 crc kubenswrapper[4938]: I1122 10:39:39.941343 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:39Z","lastTransitionTime":"2025-11-22T10:39:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.043271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.043311 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.043323 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.043338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.043352 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.145080 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.145132 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.145144 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.145162 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.145176 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.247245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.247285 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.247296 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.247312 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.247357 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.251449 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.251482 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.251495 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.251510 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.251522 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.262444 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.265818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.265859 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.265868 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.265882 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.265892 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.277043 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.280057 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.280120 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.280133 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.280152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.280187 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.291036 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.294215 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.294305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.294376 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.294459 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.294528 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.305817 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.312613 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.312653 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.312663 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.312679 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.312691 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.328703 4938 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"cb39cae8-7171-4fb1-ac14-5b3907852f98\\\",\\\"systemUUID\\\":\\\"3dc249c0-2c41-46c5-a9ec-f9214ae2ea91\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:39:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.328876 4938 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.349127 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.349157 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.349165 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.349178 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.349187 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.447157 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:40 crc kubenswrapper[4938]: E1122 10:39:40.447303 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.450785 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.450808 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.450818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.450829 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.450839 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.553241 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.553281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.553290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.553305 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.553316 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.655504 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.655537 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.655545 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.655559 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.655568 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.757340 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.757374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.757383 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.757399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.757416 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.859599 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.859637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.859646 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.859659 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.859667 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.961760 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.961796 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.961828 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.961843 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:40 crc kubenswrapper[4938]: I1122 10:39:40.961853 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:40Z","lastTransitionTime":"2025-11-22T10:39:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.063230 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.063282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.063297 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.063317 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.063328 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.165338 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.165399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.165434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.165461 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.165484 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.267693 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.267724 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.267732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.267743 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.267754 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.369772 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.369801 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.369810 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.369822 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.369831 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.446666 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.446732 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:41 crc kubenswrapper[4938]: E1122 10:39:41.446857 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.446667 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:41 crc kubenswrapper[4938]: E1122 10:39:41.446998 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:41 crc kubenswrapper[4938]: E1122 10:39:41.447046 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.472361 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.472400 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.472411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.472428 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.472439 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.574265 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.574309 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.574317 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.574331 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.574343 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.676623 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.676663 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.676674 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.676689 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.676700 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.778636 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.778664 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.778673 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.778685 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.778693 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.881327 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.881416 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.881438 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.881465 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.881486 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.983981 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.984015 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.984026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.984041 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:41 crc kubenswrapper[4938]: I1122 10:39:41.984052 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:41Z","lastTransitionTime":"2025-11-22T10:39:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.085555 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.085588 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.085599 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.085614 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.085624 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.188514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.188584 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.188786 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.188818 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.188837 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.291411 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.291462 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.291474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.291491 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.291504 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.394133 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.394234 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.394258 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.394290 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.394315 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.447153 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:42 crc kubenswrapper[4938]: E1122 10:39:42.447308 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.497238 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.497307 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.497321 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.497337 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.497349 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.599954 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.600026 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.600045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.600067 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.600083 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.703188 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.703263 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.703281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.703304 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.703321 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.806114 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.806194 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.806212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.806237 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.806259 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.909370 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.909434 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.909452 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.909477 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:42 crc kubenswrapper[4938]: I1122 10:39:42.909496 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:42Z","lastTransitionTime":"2025-11-22T10:39:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.011821 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.011873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.011890 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.011907 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.011936 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.115179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.115249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.115269 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.115292 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.115335 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.218218 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.218270 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.218287 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.218309 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.218327 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.320589 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.320650 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.320665 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.320686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.320701 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.423322 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.423393 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.423454 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.423485 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.423507 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.446954 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.447040 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.447058 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:43 crc kubenswrapper[4938]: E1122 10:39:43.447469 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:43 crc kubenswrapper[4938]: E1122 10:39:43.447564 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:43 crc kubenswrapper[4938]: E1122 10:39:43.447419 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.526240 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.526271 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.526282 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.526297 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.526307 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.629045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.629115 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.629139 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.629166 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.629214 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.732297 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.732362 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.732372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.732386 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.732395 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.835249 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.835658 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.835852 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.836093 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.836302 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.939115 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.939382 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.939474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.939569 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:43 crc kubenswrapper[4938]: I1122 10:39:43.939662 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:43Z","lastTransitionTime":"2025-11-22T10:39:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.042139 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.042179 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.042191 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.042207 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.042217 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.145070 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.145152 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.145178 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.145211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.145236 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.248030 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.248091 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.248110 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.248135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.248159 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.351405 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.351464 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.351480 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.351504 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.351524 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.446827 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:44 crc kubenswrapper[4938]: E1122 10:39:44.446959 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.453012 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.453281 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.453349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.453449 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.453539 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.557715 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.557774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.557794 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.557820 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.557838 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.660688 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.660795 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.660815 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.660839 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.660856 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.764092 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.764184 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.764205 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.764229 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.764248 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.868942 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.868983 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.868991 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.869005 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.869014 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.971717 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.971790 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.971809 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.971834 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:44 crc kubenswrapper[4938]: I1122 10:39:44.971852 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:44Z","lastTransitionTime":"2025-11-22T10:39:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.074709 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.074774 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.074797 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.074825 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.074846 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.178004 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.178079 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.178106 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.178135 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.178158 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.280514 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.280576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.280593 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.280619 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.280636 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.383288 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.383356 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.383372 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.383396 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.383411 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.446760 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.446843 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.446776 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:45 crc kubenswrapper[4938]: E1122 10:39:45.447026 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:45 crc kubenswrapper[4938]: E1122 10:39:45.447099 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:45 crc kubenswrapper[4938]: E1122 10:39:45.447358 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.486116 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.486185 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.486202 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.486226 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.486246 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.588436 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.588509 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.588531 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.588557 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.588575 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.691538 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.691622 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.691642 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.691667 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.691684 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.794302 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.794355 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.794375 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.794399 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.794416 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.896839 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.896937 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.896954 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.896973 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:45 crc kubenswrapper[4938]: I1122 10:39:45.896986 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:45Z","lastTransitionTime":"2025-11-22T10:39:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:45.999968 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.000015 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.000033 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.000054 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.000070 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.101570 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.101624 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.101634 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.101648 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.101656 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.203700 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.203761 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.203770 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.203784 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.203794 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.306964 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.307045 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.307081 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.307112 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.307133 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.409837 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.409873 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.409881 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.409895 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.409905 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.446827 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:46 crc kubenswrapper[4938]: E1122 10:39:46.446973 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.512570 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.512602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.512612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.512625 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.512637 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.615732 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.615801 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.615817 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.615839 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.615852 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.718503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.718581 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.718605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.718637 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.718661 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.821159 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.821211 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.821228 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.821245 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.821257 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.923864 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.923938 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.923952 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.923971 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:46 crc kubenswrapper[4938]: I1122 10:39:46.923982 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:46Z","lastTransitionTime":"2025-11-22T10:39:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.027782 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.027822 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.027830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.027843 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.027852 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.130957 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.131021 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.131039 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.131067 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.131087 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.233524 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.233585 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.233602 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.233627 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.233647 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.336474 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.336538 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.336559 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.336583 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.336600 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.439573 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.439631 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.439647 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.439668 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.439683 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.447206 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.447283 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.447468 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:47 crc kubenswrapper[4938]: E1122 10:39:47.447464 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:47 crc kubenswrapper[4938]: E1122 10:39:47.448042 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:47 crc kubenswrapper[4938]: E1122 10:39:47.448208 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.448854 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:39:47 crc kubenswrapper[4938]: E1122 10:39:47.449189 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-8sphc_openshift-ovn-kubernetes(8b4b8200-248f-47ae-bed3-cbfd4598b99d)\"" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.542556 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.542609 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.542625 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.542648 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.542666 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.645904 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.645997 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.646019 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.646046 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.646068 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.749289 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.749400 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.749423 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.749456 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.749477 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.851627 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.851686 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.851698 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.851718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.851732 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.954330 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.954381 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.954392 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.954409 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:47 crc kubenswrapper[4938]: I1122 10:39:47.954423 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:47Z","lastTransitionTime":"2025-11-22T10:39:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.056553 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.056601 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.056612 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.056628 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.056639 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:48Z","lastTransitionTime":"2025-11-22T10:39:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.159349 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.159435 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.159454 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.159503 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.159519 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:48Z","lastTransitionTime":"2025-11-22T10:39:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.262374 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.262422 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.262433 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.262451 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.262464 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:48Z","lastTransitionTime":"2025-11-22T10:39:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.367525 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.367576 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.367588 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.367605 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.367624 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:48Z","lastTransitionTime":"2025-11-22T10:39:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.446946 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:48 crc kubenswrapper[4938]: E1122 10:39:48.447064 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:48 crc kubenswrapper[4938]: E1122 10:39:48.468080 4938 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.470793 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=36.470778227 podStartE2EDuration="36.470778227s" podCreationTimestamp="2025-11-22 10:39:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.470571612 +0000 UTC m=+120.938409061" watchObservedRunningTime="2025-11-22 10:39:48.470778227 +0000 UTC m=+120.938615616" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.502117 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=17.502095817 podStartE2EDuration="17.502095817s" podCreationTimestamp="2025-11-22 10:39:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.500741201 +0000 UTC m=+120.968578650" watchObservedRunningTime="2025-11-22 10:39:48.502095817 +0000 UTC m=+120.969933216" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.572108 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-xdnvn" podStartSLOduration=97.572087891 podStartE2EDuration="1m37.572087891s" podCreationTimestamp="2025-11-22 10:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.541214323 +0000 UTC m=+121.009051772" watchObservedRunningTime="2025-11-22 10:39:48.572087891 +0000 UTC m=+121.039925300" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.600353 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podStartSLOduration=96.600329208 podStartE2EDuration="1m36.600329208s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.586891783 +0000 UTC m=+121.054729192" watchObservedRunningTime="2025-11-22 10:39:48.600329208 +0000 UTC m=+121.068166617" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.600577 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ngpcx" podStartSLOduration=96.600569805 podStartE2EDuration="1m36.600569805s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.598173171 +0000 UTC m=+121.066010590" watchObservedRunningTime="2025-11-22 10:39:48.600569805 +0000 UTC m=+121.068407214" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.612648 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=90.612627504 podStartE2EDuration="1m30.612627504s" podCreationTimestamp="2025-11-22 10:38:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.61132393 +0000 UTC m=+121.079161339" watchObservedRunningTime="2025-11-22 10:39:48.612627504 +0000 UTC m=+121.080464913" Nov 22 10:39:48 crc kubenswrapper[4938]: E1122 10:39:48.632475 4938 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.668471 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-6kr67" podStartSLOduration=96.668449293 podStartE2EDuration="1m36.668449293s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.66720599 +0000 UTC m=+121.135043389" watchObservedRunningTime="2025-11-22 10:39:48.668449293 +0000 UTC m=+121.136286702" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.685241 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-j67hq" podStartSLOduration=96.685218407 podStartE2EDuration="1m36.685218407s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.684069456 +0000 UTC m=+121.151906855" watchObservedRunningTime="2025-11-22 10:39:48.685218407 +0000 UTC m=+121.153055816" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.694238 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-8l8nr" podStartSLOduration=96.694217805 podStartE2EDuration="1m36.694217805s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.693252199 +0000 UTC m=+121.161089598" watchObservedRunningTime="2025-11-22 10:39:48.694217805 +0000 UTC m=+121.162055204" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.717634 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=69.717618845 podStartE2EDuration="1m9.717618845s" podCreationTimestamp="2025-11-22 10:38:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.716765582 +0000 UTC m=+121.184602991" watchObservedRunningTime="2025-11-22 10:39:48.717618845 +0000 UTC m=+121.185456244" Nov 22 10:39:48 crc kubenswrapper[4938]: I1122 10:39:48.733143 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=97.733125695 podStartE2EDuration="1m37.733125695s" podCreationTimestamp="2025-11-22 10:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:48.733073144 +0000 UTC m=+121.200910553" watchObservedRunningTime="2025-11-22 10:39:48.733125695 +0000 UTC m=+121.200963094" Nov 22 10:39:49 crc kubenswrapper[4938]: I1122 10:39:49.447227 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:49 crc kubenswrapper[4938]: I1122 10:39:49.447274 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:49 crc kubenswrapper[4938]: I1122 10:39:49.447323 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:49 crc kubenswrapper[4938]: E1122 10:39:49.447426 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:49 crc kubenswrapper[4938]: E1122 10:39:49.448226 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:49 crc kubenswrapper[4938]: E1122 10:39:49.448371 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.446905 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:50 crc kubenswrapper[4938]: E1122 10:39:50.447161 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.470718 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.470780 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.470803 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.470830 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.470851 4938 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:50Z","lastTransitionTime":"2025-11-22T10:39:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.526204 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s"] Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.526733 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.530523 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.530560 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.530564 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.531329 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.567263 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.567391 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38c518cd-9ff6-4e14-ad91-5a205c855bfd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.567476 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.567523 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/38c518cd-9ff6-4e14-ad91-5a205c855bfd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.567559 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/38c518cd-9ff6-4e14-ad91-5a205c855bfd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668335 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668380 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/38c518cd-9ff6-4e14-ad91-5a205c855bfd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668417 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/38c518cd-9ff6-4e14-ad91-5a205c855bfd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668451 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668475 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38c518cd-9ff6-4e14-ad91-5a205c855bfd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.668972 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.669030 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/38c518cd-9ff6-4e14-ad91-5a205c855bfd-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.670478 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/38c518cd-9ff6-4e14-ad91-5a205c855bfd-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.678483 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38c518cd-9ff6-4e14-ad91-5a205c855bfd-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.691537 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/38c518cd-9ff6-4e14-ad91-5a205c855bfd-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5gc2s\" (UID: \"38c518cd-9ff6-4e14-ad91-5a205c855bfd\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: I1122 10:39:50.846716 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" Nov 22 10:39:50 crc kubenswrapper[4938]: W1122 10:39:50.867848 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38c518cd_9ff6_4e14_ad91_5a205c855bfd.slice/crio-32b8e5cd01265c1462359fd93391c791c5e33ba8263b6e2eaaabc6542042e45b WatchSource:0}: Error finding container 32b8e5cd01265c1462359fd93391c791c5e33ba8263b6e2eaaabc6542042e45b: Status 404 returned error can't find the container with id 32b8e5cd01265c1462359fd93391c791c5e33ba8263b6e2eaaabc6542042e45b Nov 22 10:39:51 crc kubenswrapper[4938]: I1122 10:39:51.096429 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" event={"ID":"38c518cd-9ff6-4e14-ad91-5a205c855bfd","Type":"ContainerStarted","Data":"93f069a77bed4a6277e7248b792b1a56ee9f1ade658e6368d08727bf0a0f3a07"} Nov 22 10:39:51 crc kubenswrapper[4938]: I1122 10:39:51.096508 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" event={"ID":"38c518cd-9ff6-4e14-ad91-5a205c855bfd","Type":"ContainerStarted","Data":"32b8e5cd01265c1462359fd93391c791c5e33ba8263b6e2eaaabc6542042e45b"} Nov 22 10:39:51 crc kubenswrapper[4938]: I1122 10:39:51.447126 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:51 crc kubenswrapper[4938]: I1122 10:39:51.447141 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:51 crc kubenswrapper[4938]: E1122 10:39:51.447264 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:51 crc kubenswrapper[4938]: E1122 10:39:51.447335 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:51 crc kubenswrapper[4938]: I1122 10:39:51.447374 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:51 crc kubenswrapper[4938]: E1122 10:39:51.447586 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:52 crc kubenswrapper[4938]: I1122 10:39:52.446765 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:52 crc kubenswrapper[4938]: E1122 10:39:52.446958 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:53 crc kubenswrapper[4938]: I1122 10:39:53.446900 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:53 crc kubenswrapper[4938]: I1122 10:39:53.446960 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:53 crc kubenswrapper[4938]: E1122 10:39:53.447074 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:53 crc kubenswrapper[4938]: I1122 10:39:53.446901 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:53 crc kubenswrapper[4938]: E1122 10:39:53.447182 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:53 crc kubenswrapper[4938]: E1122 10:39:53.447314 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:53 crc kubenswrapper[4938]: E1122 10:39:53.634146 4938 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.109772 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/1.log" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.110284 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/0.log" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.110329 4938 generic.go:334] "Generic (PLEG): container finished" podID="671da3f6-347d-4f86-890d-155ef844b1f6" containerID="60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a" exitCode=1 Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.110358 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerDied","Data":"60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a"} Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.110392 4938 scope.go:117] "RemoveContainer" containerID="43319d4664a0d2050962fa912c4b7ea368c0f21cc1fce3e363928f9aaba5b0cb" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.110821 4938 scope.go:117] "RemoveContainer" containerID="60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a" Nov 22 10:39:54 crc kubenswrapper[4938]: E1122 10:39:54.111007 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-j67hq_openshift-multus(671da3f6-347d-4f86-890d-155ef844b1f6)\"" pod="openshift-multus/multus-j67hq" podUID="671da3f6-347d-4f86-890d-155ef844b1f6" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.139766 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5gc2s" podStartSLOduration=102.139741439 podStartE2EDuration="1m42.139741439s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:39:51.114547473 +0000 UTC m=+123.582384962" watchObservedRunningTime="2025-11-22 10:39:54.139741439 +0000 UTC m=+126.607578878" Nov 22 10:39:54 crc kubenswrapper[4938]: I1122 10:39:54.446357 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:54 crc kubenswrapper[4938]: E1122 10:39:54.446510 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:55 crc kubenswrapper[4938]: I1122 10:39:55.116565 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/1.log" Nov 22 10:39:55 crc kubenswrapper[4938]: I1122 10:39:55.447091 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:55 crc kubenswrapper[4938]: I1122 10:39:55.447155 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:55 crc kubenswrapper[4938]: I1122 10:39:55.447109 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:55 crc kubenswrapper[4938]: E1122 10:39:55.447266 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:55 crc kubenswrapper[4938]: E1122 10:39:55.447357 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:55 crc kubenswrapper[4938]: E1122 10:39:55.447457 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:56 crc kubenswrapper[4938]: I1122 10:39:56.446736 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:56 crc kubenswrapper[4938]: E1122 10:39:56.446959 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:57 crc kubenswrapper[4938]: I1122 10:39:57.447334 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:57 crc kubenswrapper[4938]: I1122 10:39:57.447513 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:57 crc kubenswrapper[4938]: E1122 10:39:57.447651 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:57 crc kubenswrapper[4938]: I1122 10:39:57.447692 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:57 crc kubenswrapper[4938]: E1122 10:39:57.448003 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:39:57 crc kubenswrapper[4938]: E1122 10:39:57.448019 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:58 crc kubenswrapper[4938]: I1122 10:39:58.447652 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:39:58 crc kubenswrapper[4938]: E1122 10:39:58.449886 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:39:58 crc kubenswrapper[4938]: E1122 10:39:58.635546 4938 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:39:59 crc kubenswrapper[4938]: I1122 10:39:59.447053 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:39:59 crc kubenswrapper[4938]: I1122 10:39:59.447167 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:39:59 crc kubenswrapper[4938]: E1122 10:39:59.447195 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:39:59 crc kubenswrapper[4938]: I1122 10:39:59.447358 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:39:59 crc kubenswrapper[4938]: E1122 10:39:59.447523 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:39:59 crc kubenswrapper[4938]: E1122 10:39:59.447957 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:00 crc kubenswrapper[4938]: I1122 10:40:00.447145 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:00 crc kubenswrapper[4938]: E1122 10:40:00.447318 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:01 crc kubenswrapper[4938]: I1122 10:40:01.447233 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:01 crc kubenswrapper[4938]: I1122 10:40:01.447258 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:01 crc kubenswrapper[4938]: E1122 10:40:01.447770 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:01 crc kubenswrapper[4938]: I1122 10:40:01.447280 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:01 crc kubenswrapper[4938]: E1122 10:40:01.448164 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:01 crc kubenswrapper[4938]: E1122 10:40:01.448040 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:02 crc kubenswrapper[4938]: I1122 10:40:02.447347 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:02 crc kubenswrapper[4938]: E1122 10:40:02.447483 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:02 crc kubenswrapper[4938]: I1122 10:40:02.448322 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.143639 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/3.log" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.146310 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerStarted","Data":"05e325b4fb3b81099ea954a2f435e8577e7a39a759a6db1aef36d5749167aaee"} Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.146638 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.169849 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podStartSLOduration=111.169832128 podStartE2EDuration="1m51.169832128s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:03.169644153 +0000 UTC m=+135.637481562" watchObservedRunningTime="2025-11-22 10:40:03.169832128 +0000 UTC m=+135.637669527" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.336121 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-s7w5f"] Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.336264 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:03 crc kubenswrapper[4938]: E1122 10:40:03.336380 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.446657 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:03 crc kubenswrapper[4938]: I1122 10:40:03.446757 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:03 crc kubenswrapper[4938]: E1122 10:40:03.446795 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:03 crc kubenswrapper[4938]: E1122 10:40:03.446899 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:03 crc kubenswrapper[4938]: E1122 10:40:03.637557 4938 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:40:04 crc kubenswrapper[4938]: I1122 10:40:04.447179 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:04 crc kubenswrapper[4938]: I1122 10:40:04.447231 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:04 crc kubenswrapper[4938]: E1122 10:40:04.447401 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:04 crc kubenswrapper[4938]: E1122 10:40:04.447572 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:05 crc kubenswrapper[4938]: I1122 10:40:05.446841 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:05 crc kubenswrapper[4938]: I1122 10:40:05.446945 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:05 crc kubenswrapper[4938]: E1122 10:40:05.447005 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:05 crc kubenswrapper[4938]: E1122 10:40:05.447089 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:06 crc kubenswrapper[4938]: I1122 10:40:06.446588 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:06 crc kubenswrapper[4938]: E1122 10:40:06.446975 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:06 crc kubenswrapper[4938]: I1122 10:40:06.447138 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:06 crc kubenswrapper[4938]: E1122 10:40:06.447279 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:07 crc kubenswrapper[4938]: I1122 10:40:07.446706 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:07 crc kubenswrapper[4938]: I1122 10:40:07.446714 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:07 crc kubenswrapper[4938]: I1122 10:40:07.447181 4938 scope.go:117] "RemoveContainer" containerID="60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a" Nov 22 10:40:07 crc kubenswrapper[4938]: E1122 10:40:07.447198 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:07 crc kubenswrapper[4938]: E1122 10:40:07.447326 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:08 crc kubenswrapper[4938]: I1122 10:40:08.162230 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/1.log" Nov 22 10:40:08 crc kubenswrapper[4938]: I1122 10:40:08.162315 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerStarted","Data":"11d70986b71a5b1cbaaba2bd80285a38020e3fdfd8834cce96c5292beb37815e"} Nov 22 10:40:08 crc kubenswrapper[4938]: I1122 10:40:08.446469 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:08 crc kubenswrapper[4938]: I1122 10:40:08.446559 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:08 crc kubenswrapper[4938]: E1122 10:40:08.447718 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:08 crc kubenswrapper[4938]: E1122 10:40:08.447893 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:08 crc kubenswrapper[4938]: E1122 10:40:08.638152 4938 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:40:09 crc kubenswrapper[4938]: I1122 10:40:09.446615 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:09 crc kubenswrapper[4938]: I1122 10:40:09.446719 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:09 crc kubenswrapper[4938]: E1122 10:40:09.446798 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:09 crc kubenswrapper[4938]: E1122 10:40:09.446876 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:10 crc kubenswrapper[4938]: I1122 10:40:10.447512 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:10 crc kubenswrapper[4938]: I1122 10:40:10.447565 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:10 crc kubenswrapper[4938]: E1122 10:40:10.447649 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:10 crc kubenswrapper[4938]: E1122 10:40:10.447694 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:11 crc kubenswrapper[4938]: I1122 10:40:11.446679 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:11 crc kubenswrapper[4938]: I1122 10:40:11.446759 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:11 crc kubenswrapper[4938]: E1122 10:40:11.447249 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:11 crc kubenswrapper[4938]: E1122 10:40:11.447369 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:12 crc kubenswrapper[4938]: I1122 10:40:12.446726 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:12 crc kubenswrapper[4938]: I1122 10:40:12.446765 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:12 crc kubenswrapper[4938]: E1122 10:40:12.447684 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s7w5f" podUID="7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c" Nov 22 10:40:12 crc kubenswrapper[4938]: E1122 10:40:12.448170 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:13 crc kubenswrapper[4938]: I1122 10:40:13.447094 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:13 crc kubenswrapper[4938]: E1122 10:40:13.448186 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:13 crc kubenswrapper[4938]: I1122 10:40:13.447107 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:13 crc kubenswrapper[4938]: E1122 10:40:13.448629 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.446614 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.446639 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.450307 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.450574 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.450582 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 22 10:40:14 crc kubenswrapper[4938]: I1122 10:40:14.450678 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 10:40:15 crc kubenswrapper[4938]: I1122 10:40:15.447130 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:15 crc kubenswrapper[4938]: I1122 10:40:15.447156 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:15 crc kubenswrapper[4938]: I1122 10:40:15.449452 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 22 10:40:15 crc kubenswrapper[4938]: I1122 10:40:15.449826 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.349284 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.349439 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.349477 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:19 crc kubenswrapper[4938]: E1122 10:40:19.350261 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:21.35022949 +0000 UTC m=+273.818066899 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.350567 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.358787 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.376871 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.551450 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.551730 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.558745 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.560475 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.562974 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:19 crc kubenswrapper[4938]: I1122 10:40:19.666638 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:19 crc kubenswrapper[4938]: W1122 10:40:19.812608 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-60594549e6f6027f7b3d84114dee38b80f6e3424af63f16fba39b3ee40b7d0d6 WatchSource:0}: Error finding container 60594549e6f6027f7b3d84114dee38b80f6e3424af63f16fba39b3ee40b7d0d6: Status 404 returned error can't find the container with id 60594549e6f6027f7b3d84114dee38b80f6e3424af63f16fba39b3ee40b7d0d6 Nov 22 10:40:19 crc kubenswrapper[4938]: W1122 10:40:19.945278 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-01ad141eb5b3d4af05eef07612f1b6c998571b29dffca39a23840824fb801085 WatchSource:0}: Error finding container 01ad141eb5b3d4af05eef07612f1b6c998571b29dffca39a23840824fb801085: Status 404 returned error can't find the container with id 01ad141eb5b3d4af05eef07612f1b6c998571b29dffca39a23840824fb801085 Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.211296 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a95425d99dc92da2a7ab530019d10b23323af16753384e48b7f80418748fda97"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.211669 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"60594549e6f6027f7b3d84114dee38b80f6e3424af63f16fba39b3ee40b7d0d6"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.214678 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a58dc2feb9a8fb0d3c9c0159469377af600382a11e907c609732cd15cfe3f139"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.214705 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"01ad141eb5b3d4af05eef07612f1b6c998571b29dffca39a23840824fb801085"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.214866 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.216015 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c157efc17b56d79f932a07d47e878e923acd390070bcab5798fcdf3cfaa74af5"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.216041 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"8b0e571e891243c43b9224cd31a9ed6d041ea414ab002ee1781b767f41792fea"} Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.769212 4938 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.807102 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-fk4l7"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.807832 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.809828 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.810341 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.811447 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rqzh7"] Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.812853 4938 reflector.go:561] object-"openshift-apiserver"/"etcd-client": failed to list *v1.Secret: secrets "etcd-client" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.812957 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-client\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"etcd-client\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.812877 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.813905 4938 reflector.go:561] object-"openshift-apiserver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.813999 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.814069 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.814986 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.815215 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.815841 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xb74c"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.817708 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.818060 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.820559 4938 reflector.go:561] object-"openshift-apiserver"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.820642 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.820689 4938 reflector.go:561] object-"openshift-apiserver"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.820702 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.820760 4938 reflector.go:561] object-"openshift-apiserver"/"etcd-serving-ca": failed to list *v1.ConfigMap: configmaps "etcd-serving-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.820771 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"etcd-serving-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"etcd-serving-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.820937 4938 reflector.go:561] object-"openshift-controller-manager"/"config": failed to list *v1.ConfigMap: configmaps "config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.820954 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.820984 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq": failed to list *v1.Secret: secrets "oauth-apiserver-sa-dockercfg-6r2bq" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.820995 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-6r2bq\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"oauth-apiserver-sa-dockercfg-6r2bq\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821024 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821033 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821057 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821068 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821150 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821164 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821208 4938 reflector.go:561] object-"openshift-apiserver"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821218 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821246 4938 reflector.go:561] object-"openshift-controller-manager"/"openshift-global-ca": failed to list *v1.ConfigMap: configmaps "openshift-global-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821256 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-global-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-global-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.821405 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.821503 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821619 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"encryption-config-1": failed to list *v1.Secret: secrets "encryption-config-1" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821633 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"encryption-config-1\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821707 4938 reflector.go:561] object-"openshift-apiserver"/"image-import-ca": failed to list *v1.ConfigMap: configmaps "image-import-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821718 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"image-import-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"image-import-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821735 4938 reflector.go:561] object-"openshift-controller-manager"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821744 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.821773 4938 reflector.go:561] object-"openshift-controller-manager"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.821783 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.826372 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.827065 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833494 4938 reflector.go:561] object-"openshift-machine-api"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833511 4938 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-tls": failed to list *v1.Secret: secrets "machine-api-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833538 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833556 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"audit-1": failed to list *v1.ConfigMap: configmaps "audit-1" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833579 4938 reflector.go:561] object-"openshift-machine-api"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833593 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"etcd-serving-ca": failed to list *v1.ConfigMap: configmaps "etcd-serving-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833610 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"audit-1\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit-1\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833628 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"etcd-serving-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833561 4938 reflector.go:561] object-"openshift-machine-api"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833571 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833628 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833653 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833579 4938 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7": failed to list *v1.Secret: secrets "machine-api-operator-dockercfg-mfbb7" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833664 4938 reflector.go:561] object-"openshift-apiserver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833681 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-mfbb7\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-dockercfg-mfbb7\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833693 4938 reflector.go:561] object-"openshift-apiserver"/"audit-1": failed to list *v1.ConfigMap: configmaps "audit-1" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833699 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833708 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"audit-1\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"audit-1\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833560 4938 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-images": failed to list *v1.ConfigMap: configmaps "machine-api-operator-images" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833753 4938 reflector.go:561] object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff": failed to list *v1.Secret: secrets "openshift-apiserver-sa-dockercfg-djjff" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833591 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"etcd-client": failed to list *v1.Secret: secrets "etcd-client" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.833556 4938 reflector.go:561] object-"openshift-oauth-apiserver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-oauth-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.835755 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.835211 4938 reflector.go:561] object-"openshift-apiserver"/"encryption-config-1": failed to list *v1.Secret: secrets "encryption-config-1" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.835850 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"encryption-config-1\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"encryption-config-1\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833782 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-oauth-apiserver\"/\"etcd-client\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"etcd-client\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-oauth-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.833755 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-images\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"machine-api-operator-images\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.836486 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-djjff\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-sa-dockercfg-djjff\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.837425 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-tls": failed to list *v1.Secret: secrets "machine-approver-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.837494 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-config": failed to list *v1.ConfigMap: configmaps "machine-approver-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.837995 4938 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-config": failed to list *v1.ConfigMap: configmaps "authentication-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.838017 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"authentication-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.838988 4938 reflector.go:561] object-"openshift-authentication-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.839013 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.838988 4938 reflector.go:561] object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj": failed to list *v1.Secret: secrets "authentication-operator-dockercfg-mz9bj" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.839340 4938 reflector.go:561] object-"openshift-controller-manager"/"client-ca": failed to list *v1.ConfigMap: configmaps "client-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.839349 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-mz9bj\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"authentication-operator-dockercfg-mz9bj\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.839397 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"client-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"client-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.840427 4938 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv": failed to list *v1.Secret: secrets "openshift-apiserver-operator-dockercfg-xtcjv" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.840492 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-xtcjv\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-dockercfg-xtcjv\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.841291 4938 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert": failed to list *v1.Secret: secrets "openshift-apiserver-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.841319 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.837480 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-approver-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.843483 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"machine-approver-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.843610 4938 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.843648 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.843717 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.843733 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.844183 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.844360 4938 reflector.go:561] object-"openshift-apiserver-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.844401 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.844424 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.844703 4938 reflector.go:561] object-"openshift-authentication-operator"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.844730 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4": failed to list *v1.Secret: secrets "machine-approver-sa-dockercfg-nl2j4" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.844725 4938 reflector.go:561] object-"openshift-controller-manager"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.844830 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.856422 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.856470 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.856509 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.856558 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.856884 4938 reflector.go:561] object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c": failed to list *v1.Secret: secrets "openshift-controller-manager-sa-dockercfg-msq4c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.856927 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-msq4c\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-sa-dockercfg-msq4c\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.844677 4938 reflector.go:561] object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-cluster-machine-approver": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.857695 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.844775 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.858035 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.858130 4938 reflector.go:561] object-"openshift-authentication-operator"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.858148 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.845065 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.858324 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.852172 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-nl2j4\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-approver-sa-dockercfg-nl2j4\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-machine-approver\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.858383 4938 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-apiserver-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.858396 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-apiserver-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.858414 4938 reflector.go:561] object-"openshift-authentication-operator"/"service-ca-bundle": failed to list *v1.ConfigMap: configmaps "service-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.858423 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"service-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"service-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: W1122 10:40:20.857660 4938 reflector.go:561] object-"openshift-authentication-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-authentication-operator": no relationship found between node 'crc' and this object Nov 22 10:40:20 crc kubenswrapper[4938]: E1122 10:40:20.858826 4938 reflector.go:158] "Unhandled Error" err="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-authentication-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.861384 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-k5zr9"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.861478 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.861957 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zg2km"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862382 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862552 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7vfb\" (UniqueName: \"kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862594 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862621 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862644 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnxfq\" (UniqueName: \"kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862665 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862687 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862711 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862735 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862755 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-dir\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862776 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862800 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdwwt\" (UniqueName: \"kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862822 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862843 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862866 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862883 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862897 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862911 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.862951 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863280 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxgc8\" (UniqueName: \"kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863315 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863698 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863735 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863752 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863766 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863780 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863798 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-node-pullsecrets\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863817 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863831 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863844 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863860 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzsnd\" (UniqueName: \"kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863875 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863890 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863937 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.863958 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwvk7\" (UniqueName: \"kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864022 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864053 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-audit-dir\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864075 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864103 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864166 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.864189 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.865112 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.865335 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.865750 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.867222 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6whgh"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.867702 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.868024 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.868246 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.868851 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.869500 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.870194 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.870756 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.871798 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.872131 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.874262 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.875487 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.876065 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.876297 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.876479 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.876637 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.876707 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z795b"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.877061 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.877185 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.877234 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.877414 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.877566 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.881600 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.881818 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.882100 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.882535 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rqzh7"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.884834 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.885382 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.885693 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.885958 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.890260 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.890332 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.890491 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.890542 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.891073 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.891438 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.891667 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.891752 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.891862 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892064 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892101 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892227 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892251 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892347 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892461 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892467 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892596 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892670 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892680 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892680 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892878 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.892989 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.910010 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.918661 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k9fxc"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.920083 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.920752 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-29bgm"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.921895 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.923096 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.923958 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.931186 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.931419 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.931840 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.933196 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.934224 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.934494 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.940031 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.940235 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.940350 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.943790 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.944400 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.945248 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.945649 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.952131 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.953718 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-mgpzv"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.954536 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.958227 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.958768 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.958888 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.959414 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.960392 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-fk4l7"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.963591 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vst8c"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.965097 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.965449 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.965831 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.965858 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.965876 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966413 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966587 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966614 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2334157-de9a-47fc-8dd1-9388ba35334a-serving-cert\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966636 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzsnd\" (UniqueName: \"kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966651 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966671 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966690 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4svm\" (UniqueName: \"kubernetes.io/projected/5364235f-88dd-4a0b-a055-5c075fbdff13-kube-api-access-m4svm\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966706 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966722 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwvk7\" (UniqueName: \"kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966738 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-audit-dir\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966752 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966770 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966786 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966802 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbnfs\" (UniqueName: \"kubernetes.io/projected/62a22e0b-f684-4d68-90d4-667e660287cb-kube-api-access-gbnfs\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.966819 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967234 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-audit-dir\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967423 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-config\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967534 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967558 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967590 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h9bp\" (UniqueName: \"kubernetes.io/projected/9a78982d-f026-44c1-a2d2-ec9caa99331c-kube-api-access-4h9bp\") pod \"downloads-7954f5f757-k5zr9\" (UID: \"9a78982d-f026-44c1-a2d2-ec9caa99331c\") " pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967609 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6zfn\" (UniqueName: \"kubernetes.io/projected/c2334157-de9a-47fc-8dd1-9388ba35334a-kube-api-access-n6zfn\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967633 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967650 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967667 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7vfb\" (UniqueName: \"kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967686 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnxfq\" (UniqueName: \"kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967705 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967730 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-trusted-ca\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967801 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5364235f-88dd-4a0b-a055-5c075fbdff13-serving-cert\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967816 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967832 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.967846 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c2334157-de9a-47fc-8dd1-9388ba35334a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968006 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968345 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/62a22e0b-f684-4d68-90d4-667e660287cb-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968376 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-dir\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968398 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968422 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968443 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdwwt\" (UniqueName: \"kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968461 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968483 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968506 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968529 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968635 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968643 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-dir\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968663 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968699 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968725 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968752 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968777 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxgc8\" (UniqueName: \"kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968811 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968842 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqxmh\" (UniqueName: \"kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968870 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.968898 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969133 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969161 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969204 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-node-pullsecrets\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969220 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969234 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969244 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.969338 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9a34102a-5b15-4d64-9ca6-d565af874df5-node-pullsecrets\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.977231 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.977741 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.978834 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.979767 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.983509 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.983646 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.986450 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.986757 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mh9ds"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.987183 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.987203 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.987535 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988051 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988070 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xb74c"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988081 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988090 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988099 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.988166 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.989184 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zg2km"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.989622 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.990207 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.992107 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.994352 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.994384 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.995357 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.996366 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.996373 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-sqh6q"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.997255 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.997662 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-dd667"] Nov 22 10:40:20 crc kubenswrapper[4938]: I1122 10:40:20.997791 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.020283 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dd667" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.021373 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vst8c"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.021421 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.021436 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k9fxc"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.021448 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z795b"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.022507 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.024132 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.027514 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.029505 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.029814 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-29bgm"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.037561 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-k5zr9"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.039064 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.042930 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.044800 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.045795 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.049279 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.049406 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.050882 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6whgh"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.052170 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.053187 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.054447 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.055481 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dd667"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.056615 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.058948 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.060473 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.062794 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mh9ds"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.064904 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.066497 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.068082 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069576 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqxmh\" (UniqueName: \"kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069621 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069665 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069681 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2334157-de9a-47fc-8dd1-9388ba35334a-serving-cert\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069696 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4svm\" (UniqueName: \"kubernetes.io/projected/5364235f-88dd-4a0b-a055-5c075fbdff13-kube-api-access-m4svm\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069739 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbnfs\" (UniqueName: \"kubernetes.io/projected/62a22e0b-f684-4d68-90d4-667e660287cb-kube-api-access-gbnfs\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069759 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-config\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h9bp\" (UniqueName: \"kubernetes.io/projected/9a78982d-f026-44c1-a2d2-ec9caa99331c-kube-api-access-4h9bp\") pod \"downloads-7954f5f757-k5zr9\" (UID: \"9a78982d-f026-44c1-a2d2-ec9caa99331c\") " pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069800 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6zfn\" (UniqueName: \"kubernetes.io/projected/c2334157-de9a-47fc-8dd1-9388ba35334a-kube-api-access-n6zfn\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069835 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-trusted-ca\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069855 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5364235f-88dd-4a0b-a055-5c075fbdff13-serving-cert\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069868 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069888 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069904 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c2334157-de9a-47fc-8dd1-9388ba35334a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069938 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/62a22e0b-f684-4d68-90d4-667e660287cb-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.069973 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.070002 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.070704 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.072792 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-rgbsk"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.074599 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c2334157-de9a-47fc-8dd1-9388ba35334a-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.074837 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-8rr8d"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075067 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075083 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075245 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075489 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075684 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-rgbsk"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075840 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8rr8d"] Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075848 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.075972 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c2334157-de9a-47fc-8dd1-9388ba35334a-serving-cert\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.076032 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.076886 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.082263 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.089703 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.109148 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.117723 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/62a22e0b-f684-4d68-90d4-667e660287cb-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.129143 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.170442 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.189837 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.210398 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.230259 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.234702 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-config\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.257885 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.265089 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5364235f-88dd-4a0b-a055-5c075fbdff13-trusted-ca\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.269787 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.276501 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5364235f-88dd-4a0b-a055-5c075fbdff13-serving-cert\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.290441 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.311927 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.330086 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.349632 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.370204 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.389967 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.430533 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.449527 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.469580 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.490467 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.509686 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.529360 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.549519 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.569975 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.589998 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.609591 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.629511 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.649989 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.669250 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.689277 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.709228 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.729218 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.750004 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.770180 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.790084 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.810180 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.830558 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.849263 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.870424 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.890220 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.909949 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.929615 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.950057 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967049 4938 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967165 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467140415 +0000 UTC m=+154.934977824 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967181 4938 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967229 4938 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967241 4938 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967309 4938 secret.go:188] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967350 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967400 4938 secret.go:188] Couldn't get secret openshift-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967402 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967278 4938 configmap.go:193] Couldn't get configMap openshift-controller-manager/openshift-global-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967483 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967493 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467285879 +0000 UTC m=+154.935123318 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967536 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467511595 +0000 UTC m=+154.935349014 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-ca-bundles" (UniqueName: "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967588 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467549136 +0000 UTC m=+154.935386545 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967609 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467600107 +0000 UTC m=+154.935437516 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967624 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467617108 +0000 UTC m=+154.935454517 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967644 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467631948 +0000 UTC m=+154.935469357 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967669 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467661279 +0000 UTC m=+154.935498688 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967684 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.467677049 +0000 UTC m=+154.935514468 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.967713 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config podName:2c5a6974-f26d-422a-9fb9-dcc8968fb1f2 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.46769272 +0000 UTC m=+154.935530129 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "auth-proxy-config" (UniqueName: "kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config") pod "machine-approver-56656f9798-lhxtg" (UID: "2c5a6974-f26d-422a-9fb9-dcc8968fb1f2") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968598 4938 secret.go:188] Couldn't get secret openshift-oauth-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968669 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.468650365 +0000 UTC m=+154.936487874 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.968696 4938 request.go:700] Waited for 1.009452295s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968733 4938 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968754 4938 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968778 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.468765408 +0000 UTC m=+154.936602937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968821 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968845 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config podName:64767c08-8bde-4744-b0dd-e1629fd6e349 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.468817039 +0000 UTC m=+154.936654468 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config") pod "openshift-apiserver-operator-796bbdcf4f-qskvn" (UID: "64767c08-8bde-4744-b0dd-e1629fd6e349") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968875 4938 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968937 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.468897991 +0000 UTC m=+154.936735530 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968944 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968954 4938 secret.go:188] Couldn't get secret openshift-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968966 4938 secret.go:188] Couldn't get secret openshift-authentication-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.968996 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.468982184 +0000 UTC m=+154.936819613 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969017 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469005974 +0000 UTC m=+154.936843513 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969038 4938 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969056 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469029285 +0000 UTC m=+154.936866794 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969057 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969089 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469072926 +0000 UTC m=+154.936910455 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969120 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969092 4938 secret.go:188] Couldn't get secret openshift-cluster-machine-approver/machine-approver-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969126 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469105607 +0000 UTC m=+154.936943136 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969187 4938 configmap.go:193] Couldn't get configMap openshift-controller-manager/config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969204 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969190 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469174919 +0000 UTC m=+154.937012428 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969286 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469259691 +0000 UTC m=+154.937097160 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969321 4938 secret.go:188] Couldn't get secret openshift-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969319 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls podName:2c5a6974-f26d-422a-9fb9-dcc8968fb1f2 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469301272 +0000 UTC m=+154.937138791 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-approver-tls" (UniqueName: "kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls") pod "machine-approver-56656f9798-lhxtg" (UID: "2c5a6974-f26d-422a-9fb9-dcc8968fb1f2") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969364 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469348903 +0000 UTC m=+154.937186412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969377 4938 secret.go:188] Couldn't get secret openshift-controller-manager/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969397 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469380184 +0000 UTC m=+154.937217793 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969428 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469412565 +0000 UTC m=+154.937250094 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969435 4938 configmap.go:193] Couldn't get configMap openshift-cluster-machine-approver/machine-approver-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969460 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469444326 +0000 UTC m=+154.937281825 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969489 4938 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969494 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config podName:2c5a6974-f26d-422a-9fb9-dcc8968fb1f2 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469476307 +0000 UTC m=+154.937313826 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config") pod "machine-approver-56656f9798-lhxtg" (UID: "2c5a6974-f26d-422a-9fb9-dcc8968fb1f2") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969526 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469514308 +0000 UTC m=+154.937351817 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969642 4938 secret.go:188] Couldn't get secret openshift-oauth-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969714 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469694002 +0000 UTC m=+154.937531491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969724 4938 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: E1122 10:40:21.969786 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert podName:64767c08-8bde-4744-b0dd-e1629fd6e349 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.469767014 +0000 UTC m=+154.937604513 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-qskvn" (UID: "64767c08-8bde-4744-b0dd-e1629fd6e349") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.970028 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 22 10:40:21 crc kubenswrapper[4938]: I1122 10:40:21.990642 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.009816 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.030202 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.049709 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.070669 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.091038 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.110612 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.170283 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.190287 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.210153 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.329657 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.349598 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.370098 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.389973 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.409815 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.429699 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.451079 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.470171 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487341 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487408 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487442 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487480 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487501 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487524 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487557 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487589 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487606 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487641 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487674 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487732 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487753 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487776 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487803 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487859 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487884 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487958 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.487993 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488019 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488046 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488076 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488099 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488123 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488170 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488194 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488223 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.488257 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.489575 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.513004 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.529741 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.550038 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.551135 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.570393 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.589834 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.610660 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.629540 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.655424 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.670284 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.689538 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.709948 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.730872 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.750556 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.770025 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.791495 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.811284 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.830066 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.850598 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.869954 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.910643 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqxmh\" (UniqueName: \"kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh\") pod \"console-f9d7485db-rl6xd\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.939821 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4svm\" (UniqueName: \"kubernetes.io/projected/5364235f-88dd-4a0b-a055-5c075fbdff13-kube-api-access-m4svm\") pod \"console-operator-58897d9998-k9fxc\" (UID: \"5364235f-88dd-4a0b-a055-5c075fbdff13\") " pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.954556 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbnfs\" (UniqueName: \"kubernetes.io/projected/62a22e0b-f684-4d68-90d4-667e660287cb-kube-api-access-gbnfs\") pod \"cluster-samples-operator-665b6dd947-q6v7w\" (UID: \"62a22e0b-f684-4d68-90d4-667e660287cb\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.967038 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h9bp\" (UniqueName: \"kubernetes.io/projected/9a78982d-f026-44c1-a2d2-ec9caa99331c-kube-api-access-4h9bp\") pod \"downloads-7954f5f757-k5zr9\" (UID: \"9a78982d-f026-44c1-a2d2-ec9caa99331c\") " pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.983574 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6zfn\" (UniqueName: \"kubernetes.io/projected/c2334157-de9a-47fc-8dd1-9388ba35334a-kube-api-access-n6zfn\") pod \"openshift-config-operator-7777fb866f-zg2km\" (UID: \"c2334157-de9a-47fc-8dd1-9388ba35334a\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.988420 4938 request.go:700] Waited for 1.912969927s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/hostpath-provisioner/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 22 10:40:22 crc kubenswrapper[4938]: I1122 10:40:22.990246 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.010749 4938 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.029847 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.049986 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.069489 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.082476 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.090267 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.109976 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.125946 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.139867 4938 projected.go:288] Couldn't get configMap openshift-apiserver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.161957 4938 projected.go:288] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.170479 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.192406 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.195697 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-client\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.195749 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.195784 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-service-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.195885 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.195974 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196007 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196094 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt9k4\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196137 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-serving-cert\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196165 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196190 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bdb9\" (UniqueName: \"kubernetes.io/projected/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-kube-api-access-4bdb9\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196265 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f214e86f-2781-4e64-bef7-118417786b14-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196320 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xczg7\" (UniqueName: \"kubernetes.io/projected/95e28667-9965-4f22-919f-38b1904bd4b2-kube-api-access-xczg7\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196343 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196440 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f214e86f-2781-4e64-bef7-118417786b14-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196481 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196506 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed60c2c-4a2d-4e79-a066-e76586fe7add-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196525 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24077b88-8b12-4015-9a87-00af9c32212c-trusted-ca\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196545 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/24077b88-8b12-4015-9a87-00af9c32212c-metrics-tls\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196568 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ed60c2c-4a2d-4e79-a066-e76586fe7add-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196636 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196733 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196766 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196826 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196851 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-metrics-tls\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196876 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.196954 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197091 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197124 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d56pm\" (UniqueName: \"kubernetes.io/projected/f214e86f-2781-4e64-bef7-118417786b14-kube-api-access-d56pm\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197145 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197187 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197213 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdtlw\" (UniqueName: \"kubernetes.io/projected/fa995a8f-e92f-45fa-8926-73cea902f283-kube-api-access-gdtlw\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197258 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-config\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197281 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fa995a8f-e92f-45fa-8926-73cea902f283-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197363 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197417 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqf98\" (UniqueName: \"kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197483 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197529 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197559 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed60c2c-4a2d-4e79-a066-e76586fe7add-config\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197616 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.197677 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198023 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198077 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198101 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kw9b\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-kube-api-access-7kw9b\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198124 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198153 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198200 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198224 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jdpz\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-kube-api-access-8jdpz\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198247 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6q8c\" (UniqueName: \"kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198282 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.198308 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.199077 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.699033496 +0000 UTC m=+156.166871005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.204111 4938 projected.go:194] Error preparing data for projected volume kube-api-access-bwvk7 for pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.204197 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7 podName:64767c08-8bde-4744-b0dd-e1629fd6e349 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.704174961 +0000 UTC m=+156.172012440 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-bwvk7" (UniqueName: "kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7") pod "openshift-apiserver-operator-796bbdcf4f-qskvn" (UID: "64767c08-8bde-4744-b0dd-e1629fd6e349") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.204382 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.207510 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.211642 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.215937 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.219580 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.230415 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.240824 4938 projected.go:288] Couldn't get configMap openshift-machine-api/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.241545 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-auth-proxy-config\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.249781 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.258115 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zg2km"] Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.258432 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.260030 4938 projected.go:288] Couldn't get configMap openshift-cluster-machine-approver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.272228 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.280994 4938 projected.go:288] Couldn't get configMap openshift-oauth-apiserver/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: W1122 10:40:23.287422 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2334157_de9a_47fc_8dd1_9388ba35334a.slice/crio-2fc585681731c9ef97e025fe552710f4d671fccc98c78afc8d109cabb355f564 WatchSource:0}: Error finding container 2fc585681731c9ef97e025fe552710f4d671fccc98c78afc8d109cabb355f564: Status 404 returned error can't find the container with id 2fc585681731c9ef97e025fe552710f4d671fccc98c78afc8d109cabb355f564 Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.290793 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.298873 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299074 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.299143 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.799114136 +0000 UTC m=+156.266951535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299425 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-auth-proxy-config\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.299612 4938 projected.go:288] Couldn't get configMap openshift-authentication-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299692 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299733 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89f2e6-7048-4496-9cb9-07320fb586c6-config\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299755 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/648f33c1-89ac-4734-af21-403c7270db09-cert\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299777 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kw9b\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-kube-api-access-7kw9b\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299792 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299810 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e471541-c937-41e6-9dd8-a32b167f8adf-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6q8c\" (UniqueName: \"kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299844 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d0d89759-92ad-4ff1-8ab9-7da4338c7148-tmpfs\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.299892 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.301850 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-service-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.302186 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.302583 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-service-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303590 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-webhook-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303618 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh4k4\" (UniqueName: \"kubernetes.io/projected/f4d7dc9c-3f3a-4114-ae32-b7963c7b4908-kube-api-access-xh4k4\") pod \"migrator-59844c95c7-p4glz\" (UID: \"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303638 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-service-ca-bundle\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303685 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/825e96a5-8bdd-425f-9d92-461eded30ae3-config-volume\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-serving-cert\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303766 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f214e86f-2781-4e64-bef7-118417786b14-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303813 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skhvg\" (UniqueName: \"kubernetes.io/projected/d0d89759-92ad-4ff1-8ab9-7da4338c7148-kube-api-access-skhvg\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f214e86f-2781-4e64-bef7-118417786b14-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303844 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24077b88-8b12-4015-9a87-00af9c32212c-trusted-ca\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303869 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/24077b88-8b12-4015-9a87-00af9c32212c-metrics-tls\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303886 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ed60c2c-4a2d-4e79-a066-e76586fe7add-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303902 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/fd877f2f-683c-4734-bd35-df9891a505df-signing-cabundle\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303942 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303958 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtzgm\" (UniqueName: \"kubernetes.io/projected/7885daef-8e8d-40a2-8530-7353819a1386-kube-api-access-jtzgm\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.303988 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304013 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304032 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-metrics-tls\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304053 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c94zz\" (UniqueName: \"kubernetes.io/projected/dcb746ed-5e3a-4c9e-9416-60a033f9035d-kube-api-access-c94zz\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304075 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tzmb\" (UniqueName: \"kubernetes.io/projected/000e79cf-31f1-47f0-974c-4918f468ca74-kube-api-access-6tzmb\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304096 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304117 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304147 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-plugins-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304170 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vctm7\" (UniqueName: \"kubernetes.io/projected/fd877f2f-683c-4734-bd35-df9891a505df-kube-api-access-vctm7\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/000e79cf-31f1-47f0-974c-4918f468ca74-proxy-tls\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304232 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d56pm\" (UniqueName: \"kubernetes.io/projected/f214e86f-2781-4e64-bef7-118417786b14-kube-api-access-d56pm\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304253 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-socket-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304273 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-images\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304304 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdtlw\" (UniqueName: \"kubernetes.io/projected/fa995a8f-e92f-45fa-8926-73cea902f283-kube-api-access-gdtlw\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304330 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca3f486a-1534-437f-8b98-03f1304b4686-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304352 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/000e79cf-31f1-47f0-974c-4918f468ca74-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304371 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89f2e6-7048-4496-9cb9-07320fb586c6-serving-cert\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304394 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-profile-collector-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304434 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304453 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1c0c610d-d8dd-4aa7-a313-39685144ce31-proxy-tls\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304472 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-srv-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304494 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/825e96a5-8bdd-425f-9d92-461eded30ae3-metrics-tls\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304518 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304539 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed60c2c-4a2d-4e79-a066-e76586fe7add-config\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304562 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/fd877f2f-683c-4734-bd35-df9891a505df-signing-key\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304581 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-stats-auth\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304614 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304635 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304666 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304686 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304708 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-apiservice-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304736 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304757 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jdpz\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-kube-api-access-8jdpz\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304780 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c908925-144c-4a75-a9c4-35c2b585db68-config\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304822 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c908925-144c-4a75-a9c4-35c2b585db68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304858 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304882 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-client\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304903 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304945 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhz6d\" (UniqueName: \"kubernetes.io/projected/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-kube-api-access-rhz6d\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304963 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f214e86f-2781-4e64-bef7-118417786b14-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304976 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgfv4\" (UniqueName: \"kubernetes.io/projected/cb0610d9-7370-438c-a65a-db457f13d8be-kube-api-access-mgfv4\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.304998 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305021 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c908925-144c-4a75-a9c4-35c2b585db68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305041 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-certs\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305060 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbxjs\" (UniqueName: \"kubernetes.io/projected/1c0c610d-d8dd-4aa7-a313-39685144ce31-kube-api-access-bbxjs\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305081 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b2cs\" (UniqueName: \"kubernetes.io/projected/ca3f486a-1534-437f-8b98-03f1304b4686-kube-api-access-4b2cs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305105 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305126 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-299rd\" (UniqueName: \"kubernetes.io/projected/f2634903-a7f8-4114-b3d3-f902eb3df5ee-kube-api-access-299rd\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305149 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-mountpoint-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305171 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e471541-c937-41e6-9dd8-a32b167f8adf-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305242 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-csi-data-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305292 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt9k4\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305338 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305363 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kmkc\" (UniqueName: \"kubernetes.io/projected/825e96a5-8bdd-425f-9d92-461eded30ae3-kube-api-access-5kmkc\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305387 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bdb9\" (UniqueName: \"kubernetes.io/projected/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-kube-api-access-4bdb9\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305408 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xczg7\" (UniqueName: \"kubernetes.io/projected/95e28667-9965-4f22-919f-38b1904bd4b2-kube-api-access-xczg7\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305498 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5wt7\" (UniqueName: \"kubernetes.io/projected/648f33c1-89ac-4734-af21-403c7270db09-kube-api-access-k5wt7\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305513 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305527 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed60c2c-4a2d-4e79-a066-e76586fe7add-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305535 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305574 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-node-bootstrap-token\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305604 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305634 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfppm\" (UniqueName: \"kubernetes.io/projected/2c89f2e6-7048-4496-9cb9-07320fb586c6-kube-api-access-hfppm\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305678 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305704 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305727 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-default-certificate\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305754 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsff5\" (UniqueName: \"kubernetes.io/projected/87fd6986-2f7f-4c25-bb26-0016630d173c-kube-api-access-tsff5\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305786 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305816 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3b1138e-f11b-478a-b955-7737ac63dc31-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305838 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3b1138e-f11b-478a-b955-7737ac63dc31-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305880 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcb746ed-5e3a-4c9e-9416-60a033f9035d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305969 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-metrics-certs\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.305997 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-srv-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306017 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e471541-c937-41e6-9dd8-a32b167f8adf-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306085 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306110 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306132 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306157 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg92v\" (UniqueName: \"kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306190 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-config\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306213 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fa995a8f-e92f-45fa-8926-73cea902f283-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306237 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc8c7\" (UniqueName: \"kubernetes.io/projected/f3b1138e-f11b-478a-b955-7737ac63dc31-kube-api-access-jc8c7\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306258 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306289 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-registration-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306325 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306350 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqf98\" (UniqueName: \"kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306371 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spwl6\" (UniqueName: \"kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.306794 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24077b88-8b12-4015-9a87-00af9c32212c-trusted-ca\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.307374 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-ca\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.307834 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95e28667-9965-4f22-919f-38b1904bd4b2-config\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.308031 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.308483 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed60c2c-4a2d-4e79-a066-e76586fe7add-config\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.308968 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.309328 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.309724 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed60c2c-4a2d-4e79-a066-e76586fe7add-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.309891 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.310671 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.310991 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.311172 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.311273 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.311382 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f214e86f-2781-4e64-bef7-118417786b14-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.312662 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/fa995a8f-e92f-45fa-8926-73cea902f283-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.313019 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-etcd-client\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.313066 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.313533 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.313977 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/24077b88-8b12-4015-9a87-00af9c32212c-metrics-tls\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.314435 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.314487 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.314746 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.315317 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.317711 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-metrics-tls\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.319142 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.319372 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.819356488 +0000 UTC m=+156.287193967 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.320499 4938 projected.go:288] Couldn't get configMap openshift-controller-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.320551 4938 projected.go:194] Error preparing data for projected volume kube-api-access-h8t6p for pod openshift-controller-manager/controller-manager-879f6c89f-88scl: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.320640 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.820613291 +0000 UTC m=+156.288450740 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-h8t6p" (UniqueName: "kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.320755 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.320975 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.321210 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.323411 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/95e28667-9965-4f22-919f-38b1904bd4b2-serving-cert\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.323466 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.326135 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.326736 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.336322 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.340984 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-k5zr9"] Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.345844 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-encryption-config\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.354589 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.355295 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.356238 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.379366 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-client\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.387836 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: W1122 10:40:23.387998 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a78982d_f026_44c1_a2d2_ec9caa99331c.slice/crio-63fc0f511b3fb18446c08a7b333ec91d3027e2beffc75e885708afb491f53d80 WatchSource:0}: Error finding container 63fc0f511b3fb18446c08a7b333ec91d3027e2beffc75e885708afb491f53d80: Status 404 returned error can't find the container with id 63fc0f511b3fb18446c08a7b333ec91d3027e2beffc75e885708afb491f53d80 Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407638 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407803 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407829 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-auth-proxy-config\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407853 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89f2e6-7048-4496-9cb9-07320fb586c6-config\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407871 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/648f33c1-89ac-4734-af21-403c7270db09-cert\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407893 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e471541-c937-41e6-9dd8-a32b167f8adf-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407936 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d0d89759-92ad-4ff1-8ab9-7da4338c7148-tmpfs\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407973 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-webhook-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.407991 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh4k4\" (UniqueName: \"kubernetes.io/projected/f4d7dc9c-3f3a-4114-ae32-b7963c7b4908-kube-api-access-xh4k4\") pod \"migrator-59844c95c7-p4glz\" (UID: \"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408009 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-service-ca-bundle\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408034 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/825e96a5-8bdd-425f-9d92-461eded30ae3-config-volume\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408053 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skhvg\" (UniqueName: \"kubernetes.io/projected/d0d89759-92ad-4ff1-8ab9-7da4338c7148-kube-api-access-skhvg\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408087 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/fd877f2f-683c-4734-bd35-df9891a505df-signing-cabundle\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408118 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408321 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtzgm\" (UniqueName: \"kubernetes.io/projected/7885daef-8e8d-40a2-8530-7353819a1386-kube-api-access-jtzgm\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408347 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c94zz\" (UniqueName: \"kubernetes.io/projected/dcb746ed-5e3a-4c9e-9416-60a033f9035d-kube-api-access-c94zz\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408366 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tzmb\" (UniqueName: \"kubernetes.io/projected/000e79cf-31f1-47f0-974c-4918f468ca74-kube-api-access-6tzmb\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408388 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-plugins-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408410 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vctm7\" (UniqueName: \"kubernetes.io/projected/fd877f2f-683c-4734-bd35-df9891a505df-kube-api-access-vctm7\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408434 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/000e79cf-31f1-47f0-974c-4918f468ca74-proxy-tls\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408457 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-socket-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408472 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-images\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408502 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca3f486a-1534-437f-8b98-03f1304b4686-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408519 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/000e79cf-31f1-47f0-974c-4918f468ca74-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408537 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89f2e6-7048-4496-9cb9-07320fb586c6-serving-cert\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408558 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-profile-collector-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408578 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1c0c610d-d8dd-4aa7-a313-39685144ce31-proxy-tls\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408593 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-srv-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408608 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/825e96a5-8bdd-425f-9d92-461eded30ae3-metrics-tls\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408623 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/fd877f2f-683c-4734-bd35-df9891a505df-signing-key\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408640 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-stats-auth\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408675 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-apiservice-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408712 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c908925-144c-4a75-a9c4-35c2b585db68-config\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408751 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c908925-144c-4a75-a9c4-35c2b585db68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408783 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhz6d\" (UniqueName: \"kubernetes.io/projected/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-kube-api-access-rhz6d\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408811 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgfv4\" (UniqueName: \"kubernetes.io/projected/cb0610d9-7370-438c-a65a-db457f13d8be-kube-api-access-mgfv4\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408842 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c908925-144c-4a75-a9c4-35c2b585db68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408869 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-certs\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.408898 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbxjs\" (UniqueName: \"kubernetes.io/projected/1c0c610d-d8dd-4aa7-a313-39685144ce31-kube-api-access-bbxjs\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409341 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409371 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-plugins-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409415 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d0d89759-92ad-4ff1-8ab9-7da4338c7148-tmpfs\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.409626 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.909607049 +0000 UTC m=+156.377444448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409702 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b2cs\" (UniqueName: \"kubernetes.io/projected/ca3f486a-1534-437f-8b98-03f1304b4686-kube-api-access-4b2cs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409736 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-299rd\" (UniqueName: \"kubernetes.io/projected/f2634903-a7f8-4114-b3d3-f902eb3df5ee-kube-api-access-299rd\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409753 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-mountpoint-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409769 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e471541-c937-41e6-9dd8-a32b167f8adf-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409822 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-csi-data-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409853 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kmkc\" (UniqueName: \"kubernetes.io/projected/825e96a5-8bdd-425f-9d92-461eded30ae3-kube-api-access-5kmkc\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.409892 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5wt7\" (UniqueName: \"kubernetes.io/projected/648f33c1-89ac-4734-af21-403c7270db09-kube-api-access-k5wt7\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.410201 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-auth-proxy-config\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.410677 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.411109 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89f2e6-7048-4496-9cb9-07320fb586c6-config\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.411820 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-socket-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412302 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/fd877f2f-683c-4734-bd35-df9891a505df-signing-cabundle\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412608 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1c0c610d-d8dd-4aa7-a313-39685144ce31-images\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412679 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-node-bootstrap-token\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412722 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfppm\" (UniqueName: \"kubernetes.io/projected/2c89f2e6-7048-4496-9cb9-07320fb586c6-kube-api-access-hfppm\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412744 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412760 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-default-certificate\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412785 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsff5\" (UniqueName: \"kubernetes.io/projected/87fd6986-2f7f-4c25-bb26-0016630d173c-kube-api-access-tsff5\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412821 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3b1138e-f11b-478a-b955-7737ac63dc31-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412837 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3b1138e-f11b-478a-b955-7737ac63dc31-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412870 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcb746ed-5e3a-4c9e-9416-60a033f9035d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412888 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-metrics-certs\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412924 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-srv-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.412941 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e471541-c937-41e6-9dd8-a32b167f8adf-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413061 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg92v\" (UniqueName: \"kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413091 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc8c7\" (UniqueName: \"kubernetes.io/projected/f3b1138e-f11b-478a-b955-7737ac63dc31-kube-api-access-jc8c7\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413108 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413133 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-registration-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413158 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spwl6\" (UniqueName: \"kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413194 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/825e96a5-8bdd-425f-9d92-461eded30ae3-config-volume\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413426 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-mountpoint-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.413496 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c908925-144c-4a75-a9c4-35c2b585db68-config\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.414004 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-service-ca-bundle\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.414105 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.414369 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/000e79cf-31f1-47f0-974c-4918f468ca74-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.414564 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-csi-data-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.414939 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-stats-auth\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.415197 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.416188 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e471541-c937-41e6-9dd8-a32b167f8adf-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.417442 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/648f33c1-89ac-4734-af21-403c7270db09-cert\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.417722 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3b1138e-f11b-478a-b955-7737ac63dc31-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.417811 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/87fd6986-2f7f-4c25-bb26-0016630d173c-registration-dir\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.418991 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.419400 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.419935 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcb746ed-5e3a-4c9e-9416-60a033f9035d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.420370 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-srv-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.422011 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-default-certificate\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.422612 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.422970 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-node-bootstrap-token\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423106 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89f2e6-7048-4496-9cb9-07320fb586c6-serving-cert\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423380 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-metrics-certs\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423495 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3b1138e-f11b-478a-b955-7737ac63dc31-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423670 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-profile-collector-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423810 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e471541-c937-41e6-9dd8-a32b167f8adf-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.423979 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-webhook-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424028 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca3f486a-1534-437f-8b98-03f1304b4686-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424063 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d0d89759-92ad-4ff1-8ab9-7da4338c7148-apiservice-cert\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424093 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1c0c610d-d8dd-4aa7-a313-39685144ce31-proxy-tls\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424258 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/000e79cf-31f1-47f0-974c-4918f468ca74-proxy-tls\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424572 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/825e96a5-8bdd-425f-9d92-461eded30ae3-metrics-tls\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424729 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f2634903-a7f8-4114-b3d3-f902eb3df5ee-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.424875 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb0610d9-7370-438c-a65a-db457f13d8be-srv-cert\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.425371 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/fd877f2f-683c-4734-bd35-df9891a505df-signing-key\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.425657 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7885daef-8e8d-40a2-8530-7353819a1386-certs\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.427326 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.430285 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c908925-144c-4a75-a9c4-35c2b585db68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.430558 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.449717 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.450020 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.465668 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-encryption-config\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.465899 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w"] Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.468970 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.469981 4938 projected.go:194] Error preparing data for projected volume kube-api-access-tzsnd for pod openshift-apiserver/apiserver-76f77b778f-fk4l7: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.470044 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:23.970024947 +0000 UTC m=+156.437862346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-tzsnd" (UniqueName: "kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487583 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487655 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.48763758 +0000 UTC m=+156.955474979 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487742 4938 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487788 4938 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487805 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config podName:64767c08-8bde-4744-b0dd-e1629fd6e349 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487788474 +0000 UTC m=+156.955625873 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config") pod "openshift-apiserver-operator-796bbdcf4f-qskvn" (UID: "64767c08-8bde-4744-b0dd-e1629fd6e349") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487742 4938 secret.go:188] Couldn't get secret openshift-cluster-machine-approver/machine-approver-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487824 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487814805 +0000 UTC m=+156.955652204 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487841 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls podName:2c5a6974-f26d-422a-9fb9-dcc8968fb1f2 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487831615 +0000 UTC m=+156.955669114 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-approver-tls" (UniqueName: "kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls") pod "machine-approver-56656f9798-lhxtg" (UID: "2c5a6974-f26d-422a-9fb9-dcc8968fb1f2") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487843 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/etcd-serving-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487857 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487898 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487864246 +0000 UTC m=+156.955701645 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-serving-ca" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487937 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487904877 +0000 UTC m=+156.955742276 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487958 4938 secret.go:188] Couldn't get secret openshift-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487951 4938 secret.go:188] Couldn't get secret openshift-oauth-apiserver/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.487979 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.487973569 +0000 UTC m=+156.955810968 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488020 4938 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488041 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert podName:64767c08-8bde-4744-b0dd-e1629fd6e349 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.48803638 +0000 UTC m=+156.955873769 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-qskvn" (UID: "64767c08-8bde-4744-b0dd-e1629fd6e349") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488065 4938 configmap.go:193] Couldn't get configMap openshift-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488089 4938 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488101 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca podName:d90bb3f2-72ce-41fa-b865-8892a4b70c06 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488094312 +0000 UTC m=+156.955931701 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca") pod "controller-manager-879f6c89f-88scl" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488116 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488107672 +0000 UTC m=+156.955945061 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488117 4938 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488139 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488134373 +0000 UTC m=+156.955971772 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488182 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488210 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488202495 +0000 UTC m=+156.956039974 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488266 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488281 4938 configmap.go:193] Couldn't get configMap openshift-apiserver/image-import-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488295 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488286187 +0000 UTC m=+156.956123656 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488268 4938 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488307 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca podName:9a34102a-5b15-4d64-9ca6-d565af874df5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488300747 +0000 UTC m=+156.956138146 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "image-import-ca" (UniqueName: "kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca") pod "apiserver-76f77b778f-fk4l7" (UID: "9a34102a-5b15-4d64-9ca6-d565af874df5") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488322 4938 secret.go:188] Couldn't get secret openshift-authentication-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488340 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488315008 +0000 UTC m=+156.956152407 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488353 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488347899 +0000 UTC m=+156.956185398 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488355 4938 configmap.go:193] Couldn't get configMap openshift-oauth-apiserver/audit-1: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488372 4938 secret.go:188] Couldn't get secret openshift-oauth-apiserver/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488375 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488369099 +0000 UTC m=+156.956206488 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488389 4938 configmap.go:193] Couldn't get configMap openshift-authentication-operator/authentication-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488391 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.4883863 +0000 UTC m=+156.956223699 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488414 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.48840835 +0000 UTC m=+156.956245749 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.488576 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.488563624 +0000 UTC m=+156.956401023 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.489416 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.492587 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-k9fxc"] Nov 22 10:40:23 crc kubenswrapper[4938]: W1122 10:40:23.499472 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5364235f_88dd_4a0b_a055_5c075fbdff13.slice/crio-6d3c8620875bb0b9e54d892842610bf7479823a03283061c93afe99772a676b1 WatchSource:0}: Error finding container 6d3c8620875bb0b9e54d892842610bf7479823a03283061c93afe99772a676b1: Status 404 returned error can't find the container with id 6d3c8620875bb0b9e54d892842610bf7479823a03283061c93afe99772a676b1 Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.509818 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.515082 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.515445 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.0154301 +0000 UTC m=+156.483267499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.529358 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.549442 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.569586 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.589334 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.609938 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.612246 4938 projected.go:194] Error preparing data for projected volume kube-api-access-pnxfq for pod openshift-machine-api/machine-api-operator-5694c8668f-rqzh7: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.612309 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq podName:44da01b3-b33a-402b-9bc1-ceea816d801b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.112290545 +0000 UTC m=+156.580127944 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pnxfq" (UniqueName: "kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq") pod "machine-api-operator-5694c8668f-rqzh7" (UID: "44da01b3-b33a-402b-9bc1-ceea816d801b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.616203 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.616349 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.116338042 +0000 UTC m=+156.584175441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.616619 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.616959 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.116952468 +0000 UTC m=+156.584789867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.629975 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.650178 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.670510 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.690579 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.709793 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.717831 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.718057 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.218031484 +0000 UTC m=+156.685868883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.718295 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwvk7\" (UniqueName: \"kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.718401 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.718681 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.218666111 +0000 UTC m=+156.686503510 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.723671 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwvk7\" (UniqueName: \"kubernetes.io/projected/64767c08-8bde-4744-b0dd-e1629fd6e349-kube-api-access-bwvk7\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.729591 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.731572 4938 projected.go:194] Error preparing data for projected volume kube-api-access-pdwwt for pod openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.731709 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt podName:2de462e6-eb84-4e7e-904e-5d303e8ffc17 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.231694263 +0000 UTC m=+156.699531662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pdwwt" (UniqueName: "kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt") pod "apiserver-7bbb656c7d-999ph" (UID: "2de462e6-eb84-4e7e-904e-5d303e8ffc17") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.749611 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.770962 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.789992 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.809710 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.820150 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.820389 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.320357903 +0000 UTC m=+156.788195302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.820721 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.821211 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.821529 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.321519253 +0000 UTC m=+156.789356652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.824221 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.829771 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.850288 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.869774 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.890236 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.916402 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.921635 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.921824 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.421800549 +0000 UTC m=+156.889637968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.922061 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.922463 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.422448586 +0000 UTC m=+156.890286055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.929511 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.949876 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.960167 4938 projected.go:194] Error preparing data for projected volume kube-api-access-lxgc8 for pod openshift-authentication-operator/authentication-operator-69f744f599-xb74c: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: E1122 10:40:23.960272 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8 podName:a0ba3027-3c7b-479f-be9f-ac471151ec8a nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.460250519 +0000 UTC m=+156.928087958 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-lxgc8" (UniqueName: "kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8") pod "authentication-operator-69f744f599-xb74c" (UID: "a0ba3027-3c7b-479f-be9f-ac471151ec8a") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.970178 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.988614 4938 request.go:700] Waited for 1.701206643s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/configmaps?fieldSelector=metadata.name%3Dtrusted-ca-bundle&limit=500&resourceVersion=0 Nov 22 10:40:23 crc kubenswrapper[4938]: I1122 10:40:23.996030 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.009388 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.022855 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.023036 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.523010978 +0000 UTC m=+156.990848377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.023237 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.023540 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzsnd\" (UniqueName: \"kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.023617 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.523602024 +0000 UTC m=+156.991439433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.028047 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzsnd\" (UniqueName: \"kubernetes.io/projected/9a34102a-5b15-4d64-9ca6-d565af874df5-kube-api-access-tzsnd\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.029385 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.049605 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.069727 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.090631 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.110508 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.120549 4938 projected.go:194] Error preparing data for projected volume kube-api-access-d7vfb for pod openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.120613 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb podName:2c5a6974-f26d-422a-9fb9-dcc8968fb1f2 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.620595932 +0000 UTC m=+157.088433331 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-d7vfb" (UniqueName: "kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb") pod "machine-approver-56656f9798-lhxtg" (UID: "2c5a6974-f26d-422a-9fb9-dcc8968fb1f2") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.124872 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.125218 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnxfq\" (UniqueName: \"kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.126074 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.626061286 +0000 UTC m=+157.093898685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.128944 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnxfq\" (UniqueName: \"kubernetes.io/projected/44da01b3-b33a-402b-9bc1-ceea816d801b-kube-api-access-pnxfq\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.129785 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.157808 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.170408 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.226021 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kw9b\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-kube-api-access-7kw9b\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.226805 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.227805 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.727781229 +0000 UTC m=+157.195618628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.237581 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k5zr9" event={"ID":"9a78982d-f026-44c1-a2d2-ec9caa99331c","Type":"ContainerStarted","Data":"295371d8f00675995a5af71b0f020386c3e30faa48f12ed077a40c5d2dfca6aa"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.237654 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.237669 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k5zr9" event={"ID":"9a78982d-f026-44c1-a2d2-ec9caa99331c","Type":"ContainerStarted","Data":"63fc0f511b3fb18446c08a7b333ec91d3027e2beffc75e885708afb491f53d80"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.240831 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.240887 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.242488 4938 generic.go:334] "Generic (PLEG): container finished" podID="c2334157-de9a-47fc-8dd1-9388ba35334a" containerID="5f640873c73f7a113d6e0c36dc398e810d037ccebc6eb9ec3a6f97d804289827" exitCode=0 Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.242571 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" event={"ID":"c2334157-de9a-47fc-8dd1-9388ba35334a","Type":"ContainerDied","Data":"5f640873c73f7a113d6e0c36dc398e810d037ccebc6eb9ec3a6f97d804289827"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.242634 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" event={"ID":"c2334157-de9a-47fc-8dd1-9388ba35334a","Type":"ContainerStarted","Data":"2fc585681731c9ef97e025fe552710f4d671fccc98c78afc8d109cabb355f564"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.246932 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" event={"ID":"62a22e0b-f684-4d68-90d4-667e660287cb","Type":"ContainerStarted","Data":"51e50510b478621913ba6f21b7a13dd8fc9dd01a1910d618134ae9a4315af7f0"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.246981 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" event={"ID":"62a22e0b-f684-4d68-90d4-667e660287cb","Type":"ContainerStarted","Data":"e4dcf7410df6ff467fdecedc2225d43549215ad0a5816d659dda6df82d36373e"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.246998 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" event={"ID":"62a22e0b-f684-4d68-90d4-667e660287cb","Type":"ContainerStarted","Data":"251556edb05e6ac28c8d4c75c479563f3430c9e6a424067ca94c63834fbe5d36"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.249765 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rl6xd" event={"ID":"4461eec4-354b-417f-b8ae-24e3deed3a5a","Type":"ContainerStarted","Data":"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.249808 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rl6xd" event={"ID":"4461eec4-354b-417f-b8ae-24e3deed3a5a","Type":"ContainerStarted","Data":"051d817eff1df65881caefa8936d42e3b180e592073f94727b816037a5dc3512"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.251518 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" event={"ID":"5364235f-88dd-4a0b-a055-5c075fbdff13","Type":"ContainerStarted","Data":"f045855bdcae8f923565d8d9104c6fe060d468c6186c3c9b50defe5f341dec5c"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.252647 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" event={"ID":"5364235f-88dd-4a0b-a055-5c075fbdff13","Type":"ContainerStarted","Data":"6d3c8620875bb0b9e54d892842610bf7479823a03283061c93afe99772a676b1"} Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.252684 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.254077 4938 patch_prober.go:28] interesting pod/console-operator-58897d9998-k9fxc container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.254155 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" podUID="5364235f-88dd-4a0b-a055-5c075fbdff13" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/readyz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.256353 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6q8c\" (UniqueName: \"kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c\") pod \"oauth-openshift-558db77b4-wm7ff\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.267532 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.269264 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.285890 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ed60c2c-4a2d-4e79-a066-e76586fe7add-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-l4rgm\" (UID: \"3ed60c2c-4a2d-4e79-a066-e76586fe7add\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.309656 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.327563 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jdpz\" (UniqueName: \"kubernetes.io/projected/24077b88-8b12-4015-9a87-00af9c32212c-kube-api-access-8jdpz\") pod \"ingress-operator-5b745b69d9-z795b\" (UID: \"24077b88-8b12-4015-9a87-00af9c32212c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.328322 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.328489 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.828466105 +0000 UTC m=+157.296303514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.329102 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdwwt\" (UniqueName: \"kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.329301 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.330587 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.83056828 +0000 UTC m=+157.298405679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.337093 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdwwt\" (UniqueName: \"kubernetes.io/projected/2de462e6-eb84-4e7e-904e-5d303e8ffc17-kube-api-access-pdwwt\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.345975 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d1b3eec7-79a6-4f38-b8a2-e2d1741c1479-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5m6z6\" (UID: \"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.360545 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.366123 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bdb9\" (UniqueName: \"kubernetes.io/projected/e7a859c1-d7af-4cea-aad2-c2ebb92e9d16-kube-api-access-4bdb9\") pod \"dns-operator-744455d44c-29bgm\" (UID: \"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16\") " pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.368413 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.389070 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt9k4\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.408016 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xczg7\" (UniqueName: \"kubernetes.io/projected/95e28667-9965-4f22-919f-38b1904bd4b2-kube-api-access-xczg7\") pod \"etcd-operator-b45778765-6whgh\" (UID: \"95e28667-9965-4f22-919f-38b1904bd4b2\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.425652 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d56pm\" (UniqueName: \"kubernetes.io/projected/f214e86f-2781-4e64-bef7-118417786b14-kube-api-access-d56pm\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjq2z\" (UID: \"f214e86f-2781-4e64-bef7-118417786b14\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.431924 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.432125 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.432585 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:24.93256422 +0000 UTC m=+157.400401629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.439201 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.443510 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.452716 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqf98\" (UniqueName: \"kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98\") pod \"route-controller-manager-6576b87f9c-gw96j\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.469176 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdtlw\" (UniqueName: \"kubernetes.io/projected/fa995a8f-e92f-45fa-8926-73cea902f283-kube-api-access-gdtlw\") pod \"control-plane-machine-set-operator-78cbb6b69f-6xczv\" (UID: \"fa995a8f-e92f-45fa-8926-73cea902f283\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.487727 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh4k4\" (UniqueName: \"kubernetes.io/projected/f4d7dc9c-3f3a-4114-ae32-b7963c7b4908-kube-api-access-xh4k4\") pod \"migrator-59844c95c7-p4glz\" (UID: \"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.497257 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.506610 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtzgm\" (UniqueName: \"kubernetes.io/projected/7885daef-8e8d-40a2-8530-7353819a1386-kube-api-access-jtzgm\") pod \"machine-config-server-sqh6q\" (UID: \"7885daef-8e8d-40a2-8530-7353819a1386\") " pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.525247 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c94zz\" (UniqueName: \"kubernetes.io/projected/dcb746ed-5e3a-4c9e-9416-60a033f9035d-kube-api-access-c94zz\") pod \"package-server-manager-789f6589d5-4t6l6\" (UID: \"dcb746ed-5e3a-4c9e-9416-60a033f9035d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.530348 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539393 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539464 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539509 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539596 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539632 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539692 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539725 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539768 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539797 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539851 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539907 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.539980 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540008 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540068 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540099 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540130 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540162 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540186 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540240 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540284 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxgc8\" (UniqueName: \"kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540327 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.540356 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.543659 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.543891 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-serving-cert\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.544049 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.044029029 +0000 UTC m=+157.511866438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.545399 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-image-import-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.547945 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0ba3027-3c7b-479f-be9f-ac471151ec8a-serving-cert\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.550973 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64767c08-8bde-4744-b0dd-e1629fd6e349-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.552301 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"controller-manager-879f6c89f-88scl\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.553004 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2de462e6-eb84-4e7e-904e-5d303e8ffc17-etcd-client\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.553128 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-audit\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.553656 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.553772 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.554184 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2de462e6-eb84-4e7e-904e-5d303e8ffc17-audit-policies\") pod \"apiserver-7bbb656c7d-999ph\" (UID: \"2de462e6-eb84-4e7e-904e-5d303e8ffc17\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.554882 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-images\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.555478 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44da01b3-b33a-402b-9bc1-ceea816d801b-config\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.557013 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-etcd-serving-ca\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.558304 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64767c08-8bde-4744-b0dd-e1629fd6e349-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qskvn\" (UID: \"64767c08-8bde-4744-b0dd-e1629fd6e349\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.559238 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9a34102a-5b15-4d64-9ca6-d565af874df5-trusted-ca-bundle\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.559680 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9a34102a-5b15-4d64-9ca6-d565af874df5-serving-cert\") pod \"apiserver-76f77b778f-fk4l7\" (UID: \"9a34102a-5b15-4d64-9ca6-d565af874df5\") " pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.566430 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-machine-approver-tls\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.566991 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxgc8\" (UniqueName: \"kubernetes.io/projected/a0ba3027-3c7b-479f-be9f-ac471151ec8a-kube-api-access-lxgc8\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.567499 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.568088 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0ba3027-3c7b-479f-be9f-ac471151ec8a-config\") pod \"authentication-operator-69f744f599-xb74c\" (UID: \"a0ba3027-3c7b-479f-be9f-ac471151ec8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.569764 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tzmb\" (UniqueName: \"kubernetes.io/projected/000e79cf-31f1-47f0-974c-4918f468ca74-kube-api-access-6tzmb\") pod \"machine-config-controller-84d6567774-k7dtf\" (UID: \"000e79cf-31f1-47f0-974c-4918f468ca74\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.571474 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-sqh6q" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.574527 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/44da01b3-b33a-402b-9bc1-ceea816d801b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rqzh7\" (UID: \"44da01b3-b33a-402b-9bc1-ceea816d801b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.575812 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vctm7\" (UniqueName: \"kubernetes.io/projected/fd877f2f-683c-4734-bd35-df9891a505df-kube-api-access-vctm7\") pod \"service-ca-9c57cc56f-vst8c\" (UID: \"fd877f2f-683c-4734-bd35-df9891a505df\") " pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.608682 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbxjs\" (UniqueName: \"kubernetes.io/projected/1c0c610d-d8dd-4aa7-a313-39685144ce31-kube-api-access-bbxjs\") pod \"machine-config-operator-74547568cd-97pbg\" (UID: \"1c0c610d-d8dd-4aa7-a313-39685144ce31\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.609466 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhz6d\" (UniqueName: \"kubernetes.io/projected/6e794058-99a7-45d0-ba53-8a6d3b1c7d1b-kube-api-access-rhz6d\") pod \"router-default-5444994796-mgpzv\" (UID: \"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b\") " pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.612776 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-z795b"] Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.628657 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spwl6\" (UniqueName: \"kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6\") pod \"collect-profiles-29396790-q5d2n\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.638757 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.642701 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.642973 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7vfb\" (UniqueName: \"kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.646097 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.14606929 +0000 UTC m=+157.613906729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.648863 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7vfb\" (UniqueName: \"kubernetes.io/projected/2c5a6974-f26d-422a-9fb9-dcc8968fb1f2-kube-api-access-d7vfb\") pod \"machine-approver-56656f9798-lhxtg\" (UID: \"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:24 crc kubenswrapper[4938]: W1122 10:40:24.650886 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24077b88_8b12_4015_9a87_00af9c32212c.slice/crio-b6e27d90f30e75814eef453fa0dd75e659da76966536e3fa084e228b53f8a3ad WatchSource:0}: Error finding container b6e27d90f30e75814eef453fa0dd75e659da76966536e3fa084e228b53f8a3ad: Status 404 returned error can't find the container with id b6e27d90f30e75814eef453fa0dd75e659da76966536e3fa084e228b53f8a3ad Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.663179 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b2cs\" (UniqueName: \"kubernetes.io/projected/ca3f486a-1534-437f-8b98-03f1304b4686-kube-api-access-4b2cs\") pod \"multus-admission-controller-857f4d67dd-mh9ds\" (UID: \"ca3f486a-1534-437f-8b98-03f1304b4686\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.671481 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-299rd\" (UniqueName: \"kubernetes.io/projected/f2634903-a7f8-4114-b3d3-f902eb3df5ee-kube-api-access-299rd\") pod \"olm-operator-6b444d44fb-7mhhs\" (UID: \"f2634903-a7f8-4114-b3d3-f902eb3df5ee\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.688715 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kmkc\" (UniqueName: \"kubernetes.io/projected/825e96a5-8bdd-425f-9d92-461eded30ae3-kube-api-access-5kmkc\") pod \"dns-default-dd667\" (UID: \"825e96a5-8bdd-425f-9d92-461eded30ae3\") " pod="openshift-dns/dns-default-dd667" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.693806 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.714209 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5wt7\" (UniqueName: \"kubernetes.io/projected/648f33c1-89ac-4734-af21-403c7270db09-kube-api-access-k5wt7\") pod \"ingress-canary-8rr8d\" (UID: \"648f33c1-89ac-4734-af21-403c7270db09\") " pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.726110 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.730637 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1c908925-144c-4a75-a9c4-35c2b585db68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-nghqj\" (UID: \"1c908925-144c-4a75-a9c4-35c2b585db68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.736595 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.745819 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.748085 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.248029609 +0000 UTC m=+157.715867008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.754159 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgfv4\" (UniqueName: \"kubernetes.io/projected/cb0610d9-7370-438c-a65a-db457f13d8be-kube-api-access-mgfv4\") pod \"catalog-operator-68c6474976-ggzxq\" (UID: \"cb0610d9-7370-438c-a65a-db457f13d8be\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.761244 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.761394 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.773274 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.773346 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.780670 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.781010 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg92v\" (UniqueName: \"kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v\") pod \"marketplace-operator-79b997595-jg5zd\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.782202 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.792788 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.801694 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfppm\" (UniqueName: \"kubernetes.io/projected/2c89f2e6-7048-4496-9cb9-07320fb586c6-kube-api-access-hfppm\") pod \"service-ca-operator-777779d784-wsz8t\" (UID: \"2c89f2e6-7048-4496-9cb9-07320fb586c6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.803299 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.813362 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.815470 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skhvg\" (UniqueName: \"kubernetes.io/projected/d0d89759-92ad-4ff1-8ab9-7da4338c7148-kube-api-access-skhvg\") pod \"packageserver-d55dfcdfc-lr8gp\" (UID: \"d0d89759-92ad-4ff1-8ab9-7da4338c7148\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.820326 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.821384 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.835163 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.843202 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.846553 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.851491 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.851532 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.351496067 +0000 UTC m=+157.819333466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.851600 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.851668 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.852088 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.352075773 +0000 UTC m=+157.819913172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.855392 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsff5\" (UniqueName: \"kubernetes.io/projected/87fd6986-2f7f-4c25-bb26-0016630d173c-kube-api-access-tsff5\") pod \"csi-hostpathplugin-rgbsk\" (UID: \"87fd6986-2f7f-4c25-bb26-0016630d173c\") " pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.857897 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.870293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc8c7\" (UniqueName: \"kubernetes.io/projected/f3b1138e-f11b-478a-b955-7737ac63dc31-kube-api-access-jc8c7\") pod \"kube-storage-version-migrator-operator-b67b599dd-hs2zp\" (UID: \"f3b1138e-f11b-478a-b955-7737ac63dc31\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.873768 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2e471541-c937-41e6-9dd8-a32b167f8adf-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g9gg8\" (UID: \"2e471541-c937-41e6-9dd8-a32b167f8adf\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.878554 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dd667" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.894979 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.906095 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.911572 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm"] Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.943121 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.948713 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8rr8d" Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.953794 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.953947 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.453903088 +0000 UTC m=+157.921740487 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.954268 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:24 crc kubenswrapper[4938]: E1122 10:40:24.954665 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.454654538 +0000 UTC m=+157.922492137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:24 crc kubenswrapper[4938]: I1122 10:40:24.986889 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-29bgm"] Nov 22 10:40:25 crc kubenswrapper[4938]: W1122 10:40:25.027821 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e794058_99a7_45d0_ba53_8a6d3b1c7d1b.slice/crio-38af9f77be7ed575e75b7750413ecb81929853c12c636cd689a7b87b57ca70a6 WatchSource:0}: Error finding container 38af9f77be7ed575e75b7750413ecb81929853c12c636cd689a7b87b57ca70a6: Status 404 returned error can't find the container with id 38af9f77be7ed575e75b7750413ecb81929853c12c636cd689a7b87b57ca70a6 Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.049640 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.055786 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.055968 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.5559424 +0000 UTC m=+158.023779799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.056150 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.056442 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.556429252 +0000 UTC m=+158.024266652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.083347 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.114403 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.114926 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.116109 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.161251 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.161397 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.661342669 +0000 UTC m=+158.129180068 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.161800 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.162708 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.662697065 +0000 UTC m=+158.130534464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.242096 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.264653 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-k5zr9" podStartSLOduration=133.264634124 podStartE2EDuration="2m13.264634124s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:25.262377454 +0000 UTC m=+157.730214863" watchObservedRunningTime="2025-11-22 10:40:25.264634124 +0000 UTC m=+157.732471533" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.264843 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.265133 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.765118686 +0000 UTC m=+158.232956085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.279348 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" event={"ID":"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479","Type":"ContainerStarted","Data":"86e23b87861493043e94ba9a4887d894933d5a3729e08ee70178067f286df3de"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.281645 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" event={"ID":"f214e86f-2781-4e64-bef7-118417786b14","Type":"ContainerStarted","Data":"3f96cc93c8f49a44c71bd3682c20f9d39525125815df31e5c8afe3c4bcc50238"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.284503 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" event={"ID":"c2334157-de9a-47fc-8dd1-9388ba35334a","Type":"ContainerStarted","Data":"75ac02d91a6d0af5dd40a685e23ef1f9ad1c32ac68396f471b22705ab75bab29"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.284781 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.298732 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" event={"ID":"24077b88-8b12-4015-9a87-00af9c32212c","Type":"ContainerStarted","Data":"96001f714456d8d84b2475f4bd6ada6cc8a9299560724de1874fd6f2e2e81335"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.298821 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" event={"ID":"24077b88-8b12-4015-9a87-00af9c32212c","Type":"ContainerStarted","Data":"b6e27d90f30e75814eef453fa0dd75e659da76966536e3fa084e228b53f8a3ad"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.299698 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6whgh"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.299907 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-mgpzv" event={"ID":"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b","Type":"ContainerStarted","Data":"38af9f77be7ed575e75b7750413ecb81929853c12c636cd689a7b87b57ca70a6"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.302517 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" event={"ID":"fa995a8f-e92f-45fa-8926-73cea902f283","Type":"ContainerStarted","Data":"fcde5ba89510d4e06b95b2c1af839ea6be42822e23d9cac4f33b2d0bd79c23b1"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.313486 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-sqh6q" event={"ID":"7885daef-8e8d-40a2-8530-7353819a1386","Type":"ContainerStarted","Data":"88c1dc5ee2ae4462e8eced7e0c6d0a6d411ccff0c472f01b99070c7db9ea6356"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.313538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-sqh6q" event={"ID":"7885daef-8e8d-40a2-8530-7353819a1386","Type":"ContainerStarted","Data":"ea795a2f2278995f5061c9bab6204bbb762205bba9ac349821a433633002023a"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.315503 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" event={"ID":"3ed60c2c-4a2d-4e79-a066-e76586fe7add","Type":"ContainerStarted","Data":"90c3f62756cc9b16c9ff0bc416fca734ec4cecf85074ad582e606454b7761446"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.335168 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" event={"ID":"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2","Type":"ContainerStarted","Data":"54dc64e82a2f74dfd60e150a4764ebae78b11c90adbbaae866851563e667bfba"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.340390 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" event={"ID":"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16","Type":"ContainerStarted","Data":"31d015f0889855e5fffbbf9507fe57d48afeffc8d7d20b31db8e180ee491154f"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.355329 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-rl6xd" podStartSLOduration=133.355312706 podStartE2EDuration="2m13.355312706s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:25.30939677 +0000 UTC m=+157.777234179" watchObservedRunningTime="2025-11-22 10:40:25.355312706 +0000 UTC m=+157.823150105" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.366491 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" event={"ID":"d9b35c60-eb15-4473-9098-b44308dd3926","Type":"ContainerStarted","Data":"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.366626 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.366640 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" event={"ID":"d9b35c60-eb15-4473-9098-b44308dd3926","Type":"ContainerStarted","Data":"835df655d776a0e01e1a3406db8fdec8db5869184c8e270fec02b15a936ac709"} Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.368183 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.368666 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.868643997 +0000 UTC m=+158.336481396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.368709 4938 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-wm7ff container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.368763 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.368867 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.368888 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.376549 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.471980 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.473495 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:25.973476491 +0000 UTC m=+158.441313890 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.573990 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.574683 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.07466831 +0000 UTC m=+158.542505709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.675612 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.675885 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.17587236 +0000 UTC m=+158.643709759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.724313 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.733112 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-fk4l7"] Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.778739 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.779290 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.279273537 +0000 UTC m=+158.747110936 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.886529 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.887004 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.386985577 +0000 UTC m=+158.854822976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:25 crc kubenswrapper[4938]: W1122 10:40:25.912025 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a34102a_5b15_4d64_9ca6_d565af874df5.slice/crio-a1cc5591b48818bd42b35299348732e5dcb12b81785b20b53da5cd987361a4cf WatchSource:0}: Error finding container a1cc5591b48818bd42b35299348732e5dcb12b81785b20b53da5cd987361a4cf: Status 404 returned error can't find the container with id a1cc5591b48818bd42b35299348732e5dcb12b81785b20b53da5cd987361a4cf Nov 22 10:40:25 crc kubenswrapper[4938]: I1122 10:40:25.987668 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:25 crc kubenswrapper[4938]: E1122 10:40:25.988044 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.488029923 +0000 UTC m=+158.955867322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.089487 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.090086 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.590062834 +0000 UTC m=+159.057900223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.090433 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.090837 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.590827054 +0000 UTC m=+159.058664453 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.092275 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-k9fxc" podStartSLOduration=134.092261231 podStartE2EDuration="2m14.092261231s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.085296528 +0000 UTC m=+158.553133927" watchObservedRunningTime="2025-11-22 10:40:26.092261231 +0000 UTC m=+158.560098630" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.191404 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.191719 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.691704575 +0000 UTC m=+159.159541974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.293056 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.293981 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.793960802 +0000 UTC m=+159.261798211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.297677 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.303355 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.307037 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-q6v7w" podStartSLOduration=134.307023885 podStartE2EDuration="2m14.307023885s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.305959007 +0000 UTC m=+158.773796406" watchObservedRunningTime="2025-11-22 10:40:26.307023885 +0000 UTC m=+158.774861284" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.343444 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.350677 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.383865 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-mgpzv" event={"ID":"6e794058-99a7-45d0-ba53-8a6d3b1c7d1b","Type":"ContainerStarted","Data":"702da909ff367d9fc760b35c9d33985a67b89ee79ea27c4ffc00c59ff0860961"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.389849 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" event={"ID":"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2","Type":"ContainerStarted","Data":"83978e3ba9690a80da722ca36584a31ec1476c122c7a0b2944297e73c7842418"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.393855 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" event={"ID":"edf2b3e3-6340-4f0f-8688-08f4b7a918b1","Type":"ContainerStarted","Data":"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.393895 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" event={"ID":"edf2b3e3-6340-4f0f-8688-08f4b7a918b1","Type":"ContainerStarted","Data":"153872a10e12af892b6bf660016b1fe44dcd73ff38d9b2d05f512e667dedba66"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.393924 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.395519 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.395685 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.895667104 +0000 UTC m=+159.363504503 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.395951 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.396233 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:26.896226419 +0000 UTC m=+159.364063818 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.398522 4938 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-gw96j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.398552 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.403848 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" event={"ID":"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908","Type":"ContainerStarted","Data":"bd22eb0b2e7995b0820058d6dbcad7755cce5a2a7ea0e60c7e71d00d75add6d7"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.403879 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" event={"ID":"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908","Type":"ContainerStarted","Data":"8442b8831efafd8fc4ebef400a562048de46f315454b802c7f289202ec49a20e"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.403889 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" event={"ID":"f4d7dc9c-3f3a-4114-ae32-b7963c7b4908","Type":"ContainerStarted","Data":"5e7bfcb188d3b1315d671cdf698945b1b58b01d748b87f22a3a9e88147cde8bd"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.407701 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" event={"ID":"95e28667-9965-4f22-919f-38b1904bd4b2","Type":"ContainerStarted","Data":"950d31fd13ca72a3d4e64206138878837835548dc20838d9d6f08bb40c38680e"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.407927 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" event={"ID":"95e28667-9965-4f22-919f-38b1904bd4b2","Type":"ContainerStarted","Data":"0fe214d68ecdd4766b5332c655b9982fe89977b537b7cab5c59e0d9e8067f80a"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.412133 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" event={"ID":"2de462e6-eb84-4e7e-904e-5d303e8ffc17","Type":"ContainerStarted","Data":"169337392efb9c52bc6fbfadc848e3f72df3e0f8e16dcbdae5d1f8b37eec9995"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.419292 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" event={"ID":"24077b88-8b12-4015-9a87-00af9c32212c","Type":"ContainerStarted","Data":"6b84729386b4cc74badca3f13e5a370aedcbb0f38a7ef1191b8f9904cfc5ed3a"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.424018 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" event={"ID":"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16","Type":"ContainerStarted","Data":"796f14de5764941bf856a730afd288a34ab59a8332044e341ce08b450a25d3e4"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.427500 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" event={"ID":"9a34102a-5b15-4d64-9ca6-d565af874df5","Type":"ContainerStarted","Data":"1b18563f776bf92ef6c94f35d2d2fd27f9f134adbffa8bb9a6422911698beb89"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.427540 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" event={"ID":"9a34102a-5b15-4d64-9ca6-d565af874df5","Type":"ContainerStarted","Data":"a1cc5591b48818bd42b35299348732e5dcb12b81785b20b53da5cd987361a4cf"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.431961 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" event={"ID":"d1b3eec7-79a6-4f38-b8a2-e2d1741c1479","Type":"ContainerStarted","Data":"63ea60ef09f87f176fd8e18d19f59e6207f8bdfd9a95604aaff3d16aedfc6a71"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.435858 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" event={"ID":"64767c08-8bde-4744-b0dd-e1629fd6e349","Type":"ContainerStarted","Data":"0b02b93af46bc1c058c8ff36f268b323f3c06f8cb577c7543196e23ad6f63b1b"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.442960 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" event={"ID":"f214e86f-2781-4e64-bef7-118417786b14","Type":"ContainerStarted","Data":"c7bf6c25b8bd33fd15d6005eef1ebd0327e7e8e42488fa3d5cb6c91f13786383"} Nov 22 10:40:26 crc kubenswrapper[4938]: W1122 10:40:26.450068 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c0c610d_d8dd_4aa7_a313_39685144ce31.slice/crio-92e333793410fff4068bafd1400e51cf00168524d9c220c4c7c173a9b18d9407 WatchSource:0}: Error finding container 92e333793410fff4068bafd1400e51cf00168524d9c220c4c7c173a9b18d9407: Status 404 returned error can't find the container with id 92e333793410fff4068bafd1400e51cf00168524d9c220c4c7c173a9b18d9407 Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.468618 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" event={"ID":"3ed60c2c-4a2d-4e79-a066-e76586fe7add","Type":"ContainerStarted","Data":"fd95130ea982ca3c31d499035ac52f9415a68fb8bb1ef1655e02497b1f8c3967"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.500400 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.501613 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.001596948 +0000 UTC m=+159.469434347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.538042 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" event={"ID":"fa995a8f-e92f-45fa-8926-73cea902f283","Type":"ContainerStarted","Data":"54620d14600d1048afc0768a44e0c4c664de19c63dd917089f386cffd1517945"} Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.562699 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.615104 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.618315 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.118297834 +0000 UTC m=+159.586135233 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.664955 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5m6z6" podStartSLOduration=134.664898769 podStartE2EDuration="2m14.664898769s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.616097026 +0000 UTC m=+159.083934415" watchObservedRunningTime="2025-11-22 10:40:26.664898769 +0000 UTC m=+159.132736168" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.666395 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.673865 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6xczv" podStartSLOduration=134.673841794 podStartE2EDuration="2m14.673841794s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.662419574 +0000 UTC m=+159.130256983" watchObservedRunningTime="2025-11-22 10:40:26.673841794 +0000 UTC m=+159.141679193" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.718860 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.719564 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.219548445 +0000 UTC m=+159.687385844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.719652 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xb74c"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.725327 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-mgpzv" podStartSLOduration=134.725304286 podStartE2EDuration="2m14.725304286s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.701002587 +0000 UTC m=+159.168839986" watchObservedRunningTime="2025-11-22 10:40:26.725304286 +0000 UTC m=+159.193141775" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.749443 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-sqh6q" podStartSLOduration=6.74942521 podStartE2EDuration="6.74942521s" podCreationTimestamp="2025-11-22 10:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.721422844 +0000 UTC m=+159.189260233" watchObservedRunningTime="2025-11-22 10:40:26.74942521 +0000 UTC m=+159.217262609" Nov 22 10:40:26 crc kubenswrapper[4938]: W1122 10:40:26.773245 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0ba3027_3c7b_479f_be9f_ac471151ec8a.slice/crio-9dcfb0c9019d8dfa351e2c509b795ffb42a79c3278ea7b9298bae6b951b6adb9 WatchSource:0}: Error finding container 9dcfb0c9019d8dfa351e2c509b795ffb42a79c3278ea7b9298bae6b951b6adb9: Status 404 returned error can't find the container with id 9dcfb0c9019d8dfa351e2c509b795ffb42a79c3278ea7b9298bae6b951b6adb9 Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.774326 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" podStartSLOduration=134.774309444 podStartE2EDuration="2m14.774309444s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.762100463 +0000 UTC m=+159.229937862" watchObservedRunningTime="2025-11-22 10:40:26.774309444 +0000 UTC m=+159.242146833" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.775971 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.784787 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.802594 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:26 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:26 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:26 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.802635 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.820644 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.820991 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.32097838 +0000 UTC m=+159.788815779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.822165 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.844175 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-rgbsk"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.848429 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vst8c"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.894084 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8rr8d"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.898244 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.922243 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:26 crc kubenswrapper[4938]: E1122 10:40:26.925075 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.425030194 +0000 UTC m=+159.892867613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.937429 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-p4glz" podStartSLOduration=134.9374117 podStartE2EDuration="2m14.9374117s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:26.937179634 +0000 UTC m=+159.405017033" watchObservedRunningTime="2025-11-22 10:40:26.9374117 +0000 UTC m=+159.405249099" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.938787 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.969599 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp"] Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.971394 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:40:26 crc kubenswrapper[4938]: I1122 10:40:26.998856 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.006323 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-mh9ds"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.015189 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjq2z" podStartSLOduration=135.015171893 podStartE2EDuration="2m15.015171893s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.010213513 +0000 UTC m=+159.478050912" watchObservedRunningTime="2025-11-22 10:40:27.015171893 +0000 UTC m=+159.483009292" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.017241 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.028278 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.028556 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.528545264 +0000 UTC m=+159.996382663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.030967 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dd667"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.037036 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.051370 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.054575 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rqzh7"] Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.055629 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" podStartSLOduration=135.055608336 podStartE2EDuration="2m15.055608336s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.046027294 +0000 UTC m=+159.513864693" watchObservedRunningTime="2025-11-22 10:40:27.055608336 +0000 UTC m=+159.523445735" Nov 22 10:40:27 crc kubenswrapper[4938]: W1122 10:40:27.068970 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c908925_144c_4a75_a9c4_35c2b585db68.slice/crio-cf1b892fc4cfebcbd50a48712228db9b809b78c271b9c3c756915f9e3690f61a WatchSource:0}: Error finding container cf1b892fc4cfebcbd50a48712228db9b809b78c271b9c3c756915f9e3690f61a: Status 404 returned error can't find the container with id cf1b892fc4cfebcbd50a48712228db9b809b78c271b9c3c756915f9e3690f61a Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.075929 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-6whgh" podStartSLOduration=135.075897459 podStartE2EDuration="2m15.075897459s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.070534168 +0000 UTC m=+159.538371567" watchObservedRunningTime="2025-11-22 10:40:27.075897459 +0000 UTC m=+159.543734858" Nov 22 10:40:27 crc kubenswrapper[4938]: W1122 10:40:27.091091 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44da01b3_b33a_402b_9bc1_ceea816d801b.slice/crio-96363501391cb2f457d1b5cf27b67890d00e7ecc02f570d1370b502d541f6cc9 WatchSource:0}: Error finding container 96363501391cb2f457d1b5cf27b67890d00e7ecc02f570d1370b502d541f6cc9: Status 404 returned error can't find the container with id 96363501391cb2f457d1b5cf27b67890d00e7ecc02f570d1370b502d541f6cc9 Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.092723 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-l4rgm" podStartSLOduration=135.09270511 podStartE2EDuration="2m15.09270511s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.088595332 +0000 UTC m=+159.556432731" watchObservedRunningTime="2025-11-22 10:40:27.09270511 +0000 UTC m=+159.560542509" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.115845 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" podStartSLOduration=135.115825318 podStartE2EDuration="2m15.115825318s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.113708792 +0000 UTC m=+159.581546191" watchObservedRunningTime="2025-11-22 10:40:27.115825318 +0000 UTC m=+159.583662717" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.132621 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.132887 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.632872686 +0000 UTC m=+160.100710085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.136993 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-z795b" podStartSLOduration=135.136976174 podStartE2EDuration="2m15.136976174s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.136506321 +0000 UTC m=+159.604343720" watchObservedRunningTime="2025-11-22 10:40:27.136976174 +0000 UTC m=+159.604813573" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.234565 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.234984 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.734968509 +0000 UTC m=+160.202805908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.335698 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.336329 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.836313572 +0000 UTC m=+160.304150971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.437757 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.438824 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:27.938807745 +0000 UTC m=+160.406645144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.541396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.541524 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.041499363 +0000 UTC m=+160.509336752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.541881 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.544854 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.044844351 +0000 UTC m=+160.512681750 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.551568 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" event={"ID":"2c5a6974-f26d-422a-9fb9-dcc8968fb1f2","Type":"ContainerStarted","Data":"6ba7ad59ed73cb300bba8a99c46390f61d445ba19136f46d908d4d96387f55aa"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.567538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" event={"ID":"2c89f2e6-7048-4496-9cb9-07320fb586c6","Type":"ContainerStarted","Data":"af69003a73c2993b33302674a0529e2666f502c0b9c710d7925551cd212642a0"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.567588 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" event={"ID":"2c89f2e6-7048-4496-9cb9-07320fb586c6","Type":"ContainerStarted","Data":"55da46dd76b91c0da5df51ced8ff826276b932471d251986197fe9ad05531d99"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.576744 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-lhxtg" podStartSLOduration=136.576734539 podStartE2EDuration="2m16.576734539s" podCreationTimestamp="2025-11-22 10:38:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.576257297 +0000 UTC m=+160.044094696" watchObservedRunningTime="2025-11-22 10:40:27.576734539 +0000 UTC m=+160.044571938" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.596399 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" event={"ID":"44da01b3-b33a-402b-9bc1-ceea816d801b","Type":"ContainerStarted","Data":"96363501391cb2f457d1b5cf27b67890d00e7ecc02f570d1370b502d541f6cc9"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.600225 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" event={"ID":"3d2cdfd7-9190-4322-81d6-cf73f4815c8c","Type":"ContainerStarted","Data":"c4c387ac52afcee0a5a76bfcd2e40c0a221140c5691ed1e23156ba1ca23c4d87"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.605638 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" event={"ID":"000e79cf-31f1-47f0-974c-4918f468ca74","Type":"ContainerStarted","Data":"4f99be4268bd5a6476b1e61af473ad7caf22fa2672f410ee328a9cb4a720b9b8"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.623834 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" event={"ID":"1c0c610d-d8dd-4aa7-a313-39685144ce31","Type":"ContainerStarted","Data":"706852f398bfbd1958a311cc21c99a989c3d42f496d9d73a696c328dab9742d6"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.623875 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" event={"ID":"1c0c610d-d8dd-4aa7-a313-39685144ce31","Type":"ContainerStarted","Data":"92e333793410fff4068bafd1400e51cf00168524d9c220c4c7c173a9b18d9407"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.626018 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" event={"ID":"ca3f486a-1534-437f-8b98-03f1304b4686","Type":"ContainerStarted","Data":"3b43730bd546bf0587058ddc2c782819a52683f8011eebb1046a845c21fb1a92"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.629111 4938 generic.go:334] "Generic (PLEG): container finished" podID="9a34102a-5b15-4d64-9ca6-d565af874df5" containerID="1b18563f776bf92ef6c94f35d2d2fd27f9f134adbffa8bb9a6422911698beb89" exitCode=0 Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.629157 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" event={"ID":"9a34102a-5b15-4d64-9ca6-d565af874df5","Type":"ContainerDied","Data":"1b18563f776bf92ef6c94f35d2d2fd27f9f134adbffa8bb9a6422911698beb89"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.629174 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" event={"ID":"9a34102a-5b15-4d64-9ca6-d565af874df5","Type":"ContainerStarted","Data":"43e4870664fe1d5750dfc0ba2bfdca3da4dd7928040a9467004c270ff60a3e63"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.632232 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" event={"ID":"1c908925-144c-4a75-a9c4-35c2b585db68","Type":"ContainerStarted","Data":"cf1b892fc4cfebcbd50a48712228db9b809b78c271b9c3c756915f9e3690f61a"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.637704 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" event={"ID":"f2634903-a7f8-4114-b3d3-f902eb3df5ee","Type":"ContainerStarted","Data":"6e6ffcb703d041ce4916ea870d28c255f7ddf6378ad63a45ad2813a1a1833fc5"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.639755 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" event={"ID":"d90bb3f2-72ce-41fa-b865-8892a4b70c06","Type":"ContainerStarted","Data":"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.639779 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" event={"ID":"d90bb3f2-72ce-41fa-b865-8892a4b70c06","Type":"ContainerStarted","Data":"d7bc6f7adcf937d4a6888b17a1c1512123bef9b5af6f6e162632b15bb1d88eac"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.640015 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.641483 4938 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-88scl container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.641532 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.642846 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.644138 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.1441242 +0000 UTC m=+160.611961599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.659116 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" podStartSLOduration=135.659097024 podStartE2EDuration="2m15.659097024s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.658785485 +0000 UTC m=+160.126622884" watchObservedRunningTime="2025-11-22 10:40:27.659097024 +0000 UTC m=+160.126934423" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.673817 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" event={"ID":"e7a859c1-d7af-4cea-aad2-c2ebb92e9d16","Type":"ContainerStarted","Data":"f3224dba589547f349c3a8d382236555d9dd5576dba74a5aaf9c703c1975a084"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.675426 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8rr8d" event={"ID":"648f33c1-89ac-4734-af21-403c7270db09","Type":"ContainerStarted","Data":"83e9e0bcca22358f9332b0f3a583f411da74df058c3ebfb94910c5653bfee499"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.677428 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" event={"ID":"a0ba3027-3c7b-479f-be9f-ac471151ec8a","Type":"ContainerStarted","Data":"41c7302a469e25c491ea6bb41bc5db0f2a358ee86294038a615a66cfc62c27ba"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.677586 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" event={"ID":"a0ba3027-3c7b-479f-be9f-ac471151ec8a","Type":"ContainerStarted","Data":"9dcfb0c9019d8dfa351e2c509b795ffb42a79c3278ea7b9298bae6b951b6adb9"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.680727 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" event={"ID":"2e471541-c937-41e6-9dd8-a32b167f8adf","Type":"ContainerStarted","Data":"064ca84f8fcd887e64df3aa5ac029ab555e326ec34743eb1cf70335fd609d9ab"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.682264 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" event={"ID":"cb0610d9-7370-438c-a65a-db457f13d8be","Type":"ContainerStarted","Data":"f16ab4dd829862769d2c118501cb77c3d8f637f5b4f843ea74911397f1b63412"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.682311 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" event={"ID":"cb0610d9-7370-438c-a65a-db457f13d8be","Type":"ContainerStarted","Data":"2610dbcfc966e3b75a403448a2f20031b2ab35151a7e9e213085dd45a43b3719"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.682641 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.686395 4938 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ggzxq container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.686436 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" podUID="cb0610d9-7370-438c-a65a-db457f13d8be" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.687806 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" event={"ID":"87fd6986-2f7f-4c25-bb26-0016630d173c","Type":"ContainerStarted","Data":"e4c8b2e6ddce032967b028769a08e56e01926355e40ee0c6f21d36a5ebdf1adc"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.694400 4938 generic.go:334] "Generic (PLEG): container finished" podID="2de462e6-eb84-4e7e-904e-5d303e8ffc17" containerID="26ededd9d333ac9ec91172634fe3eb56e77ca8e999bcb9394c22c23c0a389367" exitCode=0 Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.694478 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" event={"ID":"2de462e6-eb84-4e7e-904e-5d303e8ffc17","Type":"ContainerDied","Data":"26ededd9d333ac9ec91172634fe3eb56e77ca8e999bcb9394c22c23c0a389367"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.705369 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-29bgm" podStartSLOduration=135.705348489 podStartE2EDuration="2m15.705348489s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.704371853 +0000 UTC m=+160.172209252" watchObservedRunningTime="2025-11-22 10:40:27.705348489 +0000 UTC m=+160.173185898" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.720296 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" event={"ID":"64767c08-8bde-4744-b0dd-e1629fd6e349","Type":"ContainerStarted","Data":"5ed2c2462f5e54ec67952dad7397b477f5cf951cea9f98b7a5b01d63a84adb46"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.725175 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" event={"ID":"311f1063-1ead-4575-adce-cbf298b713b0","Type":"ContainerStarted","Data":"f02ff52c9e04d22ee09987b0322f2cee2ef2a6a72f032105f056d2550a7bdf04"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.733271 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" event={"ID":"dcb746ed-5e3a-4c9e-9416-60a033f9035d","Type":"ContainerStarted","Data":"9d14e0816c6cfc98267f4b57533d7e0839d14ab8948f8a9bb6170f93010fe7f3"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.733407 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" event={"ID":"dcb746ed-5e3a-4c9e-9416-60a033f9035d","Type":"ContainerStarted","Data":"ade2553e0a96c16a9d9ff9db70e5e1a2cf236d1655305289d7e106e461b9caa5"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.753426 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" event={"ID":"fd877f2f-683c-4734-bd35-df9891a505df","Type":"ContainerStarted","Data":"21209fc9f3b8e0f57ce8f388f547e3e6cfb5dcea7d9961b87f5d60ea75537cda"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.755362 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.755723 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.255711462 +0000 UTC m=+160.723548851 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.765021 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" podStartSLOduration=135.764986336 podStartE2EDuration="2m15.764986336s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.733737205 +0000 UTC m=+160.201574604" watchObservedRunningTime="2025-11-22 10:40:27.764986336 +0000 UTC m=+160.232823735" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.779620 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" event={"ID":"f3b1138e-f11b-478a-b955-7737ac63dc31","Type":"ContainerStarted","Data":"ceea416253f2c80be40f2e99295b8bcb3fc9cac2d90de23761135bc1a3ad8f0c"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.800673 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:27 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:27 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:27 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.800714 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.807803 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-xb74c" podStartSLOduration=135.80776691 podStartE2EDuration="2m15.80776691s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.806739853 +0000 UTC m=+160.274577252" watchObservedRunningTime="2025-11-22 10:40:27.80776691 +0000 UTC m=+160.275604309" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.813837 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dd667" event={"ID":"825e96a5-8bdd-425f-9d92-461eded30ae3","Type":"ContainerStarted","Data":"ee098939b2fbc838d953689d517951d008b5bce49bd0640e17563ecb4aaffd0f"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.826979 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" event={"ID":"d0d89759-92ad-4ff1-8ab9-7da4338c7148","Type":"ContainerStarted","Data":"718c085dd027199ddcf965c4f0356b013088f59de7c6c4057c968a07342faadf"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.827015 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" event={"ID":"d0d89759-92ad-4ff1-8ab9-7da4338c7148","Type":"ContainerStarted","Data":"a516686563f524c9ab9686511ed49af1e5aaee310effc6239aa187afa46dc3f9"} Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.831010 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.836496 4938 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-lr8gp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.837400 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" podUID="d0d89759-92ad-4ff1-8ab9-7da4338c7148" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.868032 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qskvn" podStartSLOduration=135.868007243 podStartE2EDuration="2m15.868007243s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.824749546 +0000 UTC m=+160.292586945" watchObservedRunningTime="2025-11-22 10:40:27.868007243 +0000 UTC m=+160.335844642" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.871647 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" podStartSLOduration=135.871625828 podStartE2EDuration="2m15.871625828s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:27.867334675 +0000 UTC m=+160.335172074" watchObservedRunningTime="2025-11-22 10:40:27.871625828 +0000 UTC m=+160.339463227" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.872977 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.875327 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.375307885 +0000 UTC m=+160.843145284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.942405 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:40:27 crc kubenswrapper[4938]: I1122 10:40:27.975945 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:27 crc kubenswrapper[4938]: E1122 10:40:27.976227 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.476214377 +0000 UTC m=+160.944051776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.076510 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.076684 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.576661286 +0000 UTC m=+161.044498685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.076751 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.077095 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.577084547 +0000 UTC m=+161.044921946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.180210 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.180353 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.68033298 +0000 UTC m=+161.148170379 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.180711 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.181084 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.68107345 +0000 UTC m=+161.148910849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.282367 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.282575 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.782545265 +0000 UTC m=+161.250382664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.384598 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.385000 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.884981087 +0000 UTC m=+161.352818496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.486499 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.486627 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.986600267 +0000 UTC m=+161.454437666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.486693 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.487020 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:28.987011018 +0000 UTC m=+161.454848417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.588205 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.588501 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.088465404 +0000 UTC m=+161.556302803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.588635 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.588935 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.088904995 +0000 UTC m=+161.556742394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.689405 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.689944 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.18992656 +0000 UTC m=+161.657763959 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.790535 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.790865 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.290853352 +0000 UTC m=+161.758690751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.791898 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:28 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:28 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:28 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.791958 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.862485 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" event={"ID":"1c0c610d-d8dd-4aa7-a313-39685144ce31","Type":"ContainerStarted","Data":"7580f08660f3d927b9b26ade27a8a6bb733c18271aa16684f57aa8f75a33d99c"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.864186 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" event={"ID":"f3b1138e-f11b-478a-b955-7737ac63dc31","Type":"ContainerStarted","Data":"b109a6278601400a69be3fa2c52a890d71a95277b1deee34604764c671f3625b"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.870207 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" event={"ID":"1c908925-144c-4a75-a9c4-35c2b585db68","Type":"ContainerStarted","Data":"4e72a9b5042ff2c83af3e7ce16edd0058526fc072d9004bc223b280fb0c9ae5d"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.894460 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.894579 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.394556207 +0000 UTC m=+161.862393606 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.894902 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:28 crc kubenswrapper[4938]: E1122 10:40:28.897258 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.397245528 +0000 UTC m=+161.865082927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.900755 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" event={"ID":"3d2cdfd7-9190-4322-81d6-cf73f4815c8c","Type":"ContainerStarted","Data":"bfaf8526044e1feb6345834ea24d8521f5977f572a78d06001135fbcd9f73a02"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.908000 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" event={"ID":"2de462e6-eb84-4e7e-904e-5d303e8ffc17","Type":"ContainerStarted","Data":"0ef5176dde26676d1a5f408ca6a7a6e10384818f25701b494162016da5c16d08"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.909646 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" event={"ID":"ca3f486a-1534-437f-8b98-03f1304b4686","Type":"ContainerStarted","Data":"e09d22f1d9ee58e032bbd45e7223c1ab6228a951444951e8335f3c011b84c19c"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.909671 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" event={"ID":"ca3f486a-1534-437f-8b98-03f1304b4686","Type":"ContainerStarted","Data":"d682024265b28f41214b9eb74cbfb38ef4cb3164d916fb2675f373ba13aedbee"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.911515 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" event={"ID":"9a34102a-5b15-4d64-9ca6-d565af874df5","Type":"ContainerStarted","Data":"2426240f8108e86e8d573b0c7858d52989aad36e980ea96de64698974651095c"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.918264 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" event={"ID":"311f1063-1ead-4575-adce-cbf298b713b0","Type":"ContainerStarted","Data":"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.918867 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.919739 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" event={"ID":"2e471541-c937-41e6-9dd8-a32b167f8adf","Type":"ContainerStarted","Data":"2fcea94410fa8ebc5e7be2f9245f112f81ec2990d14dccfeca8c8f7863c91c27"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.920201 4938 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-jg5zd container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.920281 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.921505 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" event={"ID":"f2634903-a7f8-4114-b3d3-f902eb3df5ee","Type":"ContainerStarted","Data":"21cafc4171ba329aa7172b7a76a9c4a2ac3ef5c03522b2e02eb27615bb42ae0a"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.922445 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.925665 4938 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-7mhhs container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.925710 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" podUID="f2634903-a7f8-4114-b3d3-f902eb3df5ee" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.932630 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dd667" event={"ID":"825e96a5-8bdd-425f-9d92-461eded30ae3","Type":"ContainerStarted","Data":"faf2a0623e9546b07e59edc5336f673572dd8da159d289241cde9cb0ae3b5e05"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.932669 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dd667" event={"ID":"825e96a5-8bdd-425f-9d92-461eded30ae3","Type":"ContainerStarted","Data":"a42007344ca7a033b9bdc53b1e5b6db0c11c590138143a5473c90c2f4928218c"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.932689 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-dd667" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.934447 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" event={"ID":"44da01b3-b33a-402b-9bc1-ceea816d801b","Type":"ContainerStarted","Data":"abe3a9b52cda5d993ae22b19faf0215bc31f67e7b9f949935aed193f671a9213"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.934504 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" event={"ID":"44da01b3-b33a-402b-9bc1-ceea816d801b","Type":"ContainerStarted","Data":"5ac2e342a1cc9ad318f34fb2137224c882ffca1aed0473173a90bf3e44a8aa5f"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.944108 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-97pbg" podStartSLOduration=136.944070358 podStartE2EDuration="2m16.944070358s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:28.925854459 +0000 UTC m=+161.393691858" watchObservedRunningTime="2025-11-22 10:40:28.944070358 +0000 UTC m=+161.411907757" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.950629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" event={"ID":"dcb746ed-5e3a-4c9e-9416-60a033f9035d","Type":"ContainerStarted","Data":"fef4b74a2227e04ecc9880aa70eae0d504a723f8ee75014906a245c6cefb7644"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.951358 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.955420 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" event={"ID":"fd877f2f-683c-4734-bd35-df9891a505df","Type":"ContainerStarted","Data":"7a6c6ed35cb2a12741c6a67ceb6e6279212ee1a2226abc98f3afab7b681604d6"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.959899 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8rr8d" event={"ID":"648f33c1-89ac-4734-af21-403c7270db09","Type":"ContainerStarted","Data":"ce313e8cf071ecf970348a93135b8f3c540a3e5adaa83ab6c29d621a30176b09"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.964077 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hs2zp" podStartSLOduration=136.964061563 podStartE2EDuration="2m16.964061563s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:28.943860263 +0000 UTC m=+161.411697662" watchObservedRunningTime="2025-11-22 10:40:28.964061563 +0000 UTC m=+161.431898992" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.964161 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-nghqj" podStartSLOduration=136.964158156 podStartE2EDuration="2m16.964158156s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:28.963429717 +0000 UTC m=+161.431267116" watchObservedRunningTime="2025-11-22 10:40:28.964158156 +0000 UTC m=+161.431995555" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.967012 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" event={"ID":"000e79cf-31f1-47f0-974c-4918f468ca74","Type":"ContainerStarted","Data":"f72b57b4e07d8fa4e30b2242fe9318a6b59863df4bfe0b59442139a2a0a54f80"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.967069 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" event={"ID":"000e79cf-31f1-47f0-974c-4918f468ca74","Type":"ContainerStarted","Data":"2d2ddfa0f0edbd725446624f3a1f8951d63eacdc31fbdc3bf578d5da5e502832"} Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.968466 4938 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-88scl container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.968518 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.968605 4938 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ggzxq container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.968888 4938 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-lr8gp container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.968985 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" podUID="d0d89759-92ad-4ff1-8ab9-7da4338c7148" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.969185 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" podUID="cb0610d9-7370-438c-a65a-db457f13d8be" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.986303 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g9gg8" podStartSLOduration=136.986287178 podStartE2EDuration="2m16.986287178s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:28.983475704 +0000 UTC m=+161.451313113" watchObservedRunningTime="2025-11-22 10:40:28.986287178 +0000 UTC m=+161.454124577" Nov 22 10:40:28 crc kubenswrapper[4938]: I1122 10:40:28.999590 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:28.999983 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.499964147 +0000 UTC m=+161.967801546 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.000754 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" podStartSLOduration=137.000733447 podStartE2EDuration="2m17.000733447s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:28.99741202 +0000 UTC m=+161.465249419" watchObservedRunningTime="2025-11-22 10:40:29.000733447 +0000 UTC m=+161.468570846" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.031980 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" podStartSLOduration=137.031810474 podStartE2EDuration="2m17.031810474s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.030105229 +0000 UTC m=+161.497942628" watchObservedRunningTime="2025-11-22 10:40:29.031810474 +0000 UTC m=+161.499647873" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.032351 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-8rr8d" podStartSLOduration=9.032345528 podStartE2EDuration="9.032345528s" podCreationTimestamp="2025-11-22 10:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.015374452 +0000 UTC m=+161.483211851" watchObservedRunningTime="2025-11-22 10:40:29.032345528 +0000 UTC m=+161.500182927" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.046772 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" podStartSLOduration=137.046752226 podStartE2EDuration="2m17.046752226s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.045897844 +0000 UTC m=+161.513735243" watchObservedRunningTime="2025-11-22 10:40:29.046752226 +0000 UTC m=+161.514589635" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.060483 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-mh9ds" podStartSLOduration=137.060463917 podStartE2EDuration="2m17.060463917s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.059095761 +0000 UTC m=+161.526933180" watchObservedRunningTime="2025-11-22 10:40:29.060463917 +0000 UTC m=+161.528301326" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.089227 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.104036 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.106204 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.606182098 +0000 UTC m=+162.074019487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.114622 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" podStartSLOduration=137.114588489 podStartE2EDuration="2m17.114588489s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.085763861 +0000 UTC m=+161.553601260" watchObservedRunningTime="2025-11-22 10:40:29.114588489 +0000 UTC m=+161.582425888" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.115463 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" podStartSLOduration=137.115454942 podStartE2EDuration="2m17.115454942s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.109761922 +0000 UTC m=+161.577599321" watchObservedRunningTime="2025-11-22 10:40:29.115454942 +0000 UTC m=+161.583292341" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.127793 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vst8c" podStartSLOduration=137.127762785 podStartE2EDuration="2m17.127762785s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.12604852 +0000 UTC m=+161.593885919" watchObservedRunningTime="2025-11-22 10:40:29.127762785 +0000 UTC m=+161.595600184" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.152254 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" podStartSLOduration=137.152228938 podStartE2EDuration="2m17.152228938s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.144575847 +0000 UTC m=+161.612413246" watchObservedRunningTime="2025-11-22 10:40:29.152228938 +0000 UTC m=+161.620066357" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.196271 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-dd667" podStartSLOduration=9.196251155 podStartE2EDuration="9.196251155s" podCreationTimestamp="2025-11-22 10:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.17855586 +0000 UTC m=+161.646393259" watchObservedRunningTime="2025-11-22 10:40:29.196251155 +0000 UTC m=+161.664088554" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.197128 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-rqzh7" podStartSLOduration=137.197119268 podStartE2EDuration="2m17.197119268s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.195858464 +0000 UTC m=+161.663695863" watchObservedRunningTime="2025-11-22 10:40:29.197119268 +0000 UTC m=+161.664956667" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.205666 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.205837 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.705813846 +0000 UTC m=+162.173651245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.206106 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.206592 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.706584766 +0000 UTC m=+162.174422165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.221082 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wsz8t" podStartSLOduration=137.221063437 podStartE2EDuration="2m17.221063437s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.220706227 +0000 UTC m=+161.688543626" watchObservedRunningTime="2025-11-22 10:40:29.221063437 +0000 UTC m=+161.688900836" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.252537 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-k7dtf" podStartSLOduration=137.252522383 podStartE2EDuration="2m17.252522383s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:29.246101555 +0000 UTC m=+161.713938954" watchObservedRunningTime="2025-11-22 10:40:29.252522383 +0000 UTC m=+161.720359782" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.307331 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.307558 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.807543979 +0000 UTC m=+162.275381378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.409626 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.410064 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:29.910047813 +0000 UTC m=+162.377885212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.510877 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.511042 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.011016846 +0000 UTC m=+162.478854245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.511104 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.511445 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.011434097 +0000 UTC m=+162.479271536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.612329 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.612689 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.112675107 +0000 UTC m=+162.580512506 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.713972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.714299 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.214286827 +0000 UTC m=+162.682124226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.726436 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.726482 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.728122 4938 patch_prober.go:28] interesting pod/apiserver-76f77b778f-fk4l7 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.728183 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" podUID="9a34102a-5b15-4d64-9ca6-d565af874df5" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.773479 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.773532 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.775003 4938 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-999ph container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.775048 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" podUID="2de462e6-eb84-4e7e-904e-5d303e8ffc17" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.786124 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:29 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:29 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:29 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.786169 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.814796 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.814994 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.314964353 +0000 UTC m=+162.782801742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.815150 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.815452 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.315444455 +0000 UTC m=+162.783281854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.916572 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.916717 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.416690296 +0000 UTC m=+162.884527695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.916900 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:29 crc kubenswrapper[4938]: E1122 10:40:29.917180 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.417172779 +0000 UTC m=+162.885010178 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.974426 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" event={"ID":"87fd6986-2f7f-4c25-bb26-0016630d173c","Type":"ContainerStarted","Data":"c8bdaef700257e26ff315c4bdc6022bb4517546a5c06a0a10b6a42761600b8a7"} Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.974995 4938 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-jg5zd container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.975319 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Nov 22 10:40:29 crc kubenswrapper[4938]: I1122 10:40:29.995655 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7mhhs" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.018023 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.019103 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.519070636 +0000 UTC m=+162.986908035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.120165 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.120769 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.620730848 +0000 UTC m=+163.088568247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.221281 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.221470 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.721443104 +0000 UTC m=+163.189280503 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.278803 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-lr8gp" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.323347 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.323794 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.823774063 +0000 UTC m=+163.291611572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.425132 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.444848 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.944818224 +0000 UTC m=+163.412655623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.527690 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.528114 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.028077561 +0000 UTC m=+163.495914960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.628542 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.628856 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.128841929 +0000 UTC m=+163.596679318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.651529 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.652261 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.669872 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.669901 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.691488 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.729898 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.730237 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.230222653 +0000 UTC m=+163.698060052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.730292 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.730330 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.784560 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:30 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:30 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:30 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.784606 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.831153 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.831394 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.831428 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.831483 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.831547 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.331533236 +0000 UTC m=+163.799370635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.870759 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.932180 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:30 crc kubenswrapper[4938]: E1122 10:40:30.932517 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.432504859 +0000 UTC m=+163.900342258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:30 crc kubenswrapper[4938]: I1122 10:40:30.967203 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.001895 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" event={"ID":"87fd6986-2f7f-4c25-bb26-0016630d173c","Type":"ContainerStarted","Data":"596f2642aaa95980e699ba41ca15560cc45008f2c7699324dcf3d07f11fbe24a"} Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.003534 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.033100 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.034178 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.53416407 +0000 UTC m=+164.002001469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.134762 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.135321 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.635305508 +0000 UTC m=+164.103142907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.235599 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.235847 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.735816719 +0000 UTC m=+164.203654118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.287519 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.288649 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.293550 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.314288 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.337400 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.337469 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97p8p\" (UniqueName: \"kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.337503 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.337529 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.337878 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.8378639 +0000 UTC m=+164.305701299 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.438232 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.438382 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.938353631 +0000 UTC m=+164.406191030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.438530 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.438571 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97p8p\" (UniqueName: \"kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.438592 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.438608 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.438933 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:31.938893305 +0000 UTC m=+164.406730704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.439378 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.439533 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.455653 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.456731 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.462221 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.477047 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.539798 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.539956 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.03993477 +0000 UTC m=+164.507772169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.540009 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdd9b\" (UniqueName: \"kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.540053 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.540068 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.540172 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.540612 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.040592508 +0000 UTC m=+164.508429907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.639990 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97p8p\" (UniqueName: \"kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p\") pod \"community-operators-tqbjm\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.641673 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.641888 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdd9b\" (UniqueName: \"kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.641966 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.641989 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.642370 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.142342691 +0000 UTC m=+164.610180120 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.650636 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.651844 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.663130 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.691895 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.692221 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.696677 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdd9b\" (UniqueName: \"kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b\") pod \"certified-operators-jcz58\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.712287 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.742960 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.743040 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.743069 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2s2z\" (UniqueName: \"kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.743123 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.743824 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.243810898 +0000 UTC m=+164.711648297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.784668 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:31 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:31 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:31 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.784718 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.846744 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.846927 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.846984 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.847031 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2s2z\" (UniqueName: \"kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.847091 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.34705615 +0000 UTC m=+164.814893559 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.847460 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.847605 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.871134 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.872605 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.899649 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2s2z\" (UniqueName: \"kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z\") pod \"community-operators-gv775\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.904610 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.916144 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.938281 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.949458 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbwdm\" (UniqueName: \"kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.949498 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.949527 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.949556 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:31 crc kubenswrapper[4938]: E1122 10:40:31.949885 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.449868511 +0000 UTC m=+164.917705920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:31 crc kubenswrapper[4938]: I1122 10:40:31.965227 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.052415 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.052637 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.052720 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.052737 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbwdm\" (UniqueName: \"kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.053404 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.553384511 +0000 UTC m=+165.021221910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.054211 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.054684 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.061939 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e2545656-f2d7-49a3-bcdf-52cd837b2a25","Type":"ContainerStarted","Data":"d5213a6f6737c486bda9a672ee8cf2c5b0838e97b1dfcad35301449031e4bc87"} Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.086655 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbwdm\" (UniqueName: \"kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm\") pod \"certified-operators-dh2qc\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.100531 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" event={"ID":"87fd6986-2f7f-4c25-bb26-0016630d173c","Type":"ContainerStarted","Data":"4b6bd15a3db5551504ce6a0ce7150e98fdf7a06835a089071d4f3246ff9c23e7"} Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.100580 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" event={"ID":"87fd6986-2f7f-4c25-bb26-0016630d173c","Type":"ContainerStarted","Data":"e1a552f3f2c216dd59d879d8f2b7dc1904adc22bb9d5352d52b5440e6a013ad3"} Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.133762 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-rgbsk" podStartSLOduration=12.133744503 podStartE2EDuration="12.133744503s" podCreationTimestamp="2025-11-22 10:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:32.133193619 +0000 UTC m=+164.601031018" watchObservedRunningTime="2025-11-22 10:40:32.133744503 +0000 UTC m=+164.601581902" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.155488 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.157858 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.657845766 +0000 UTC m=+165.125683165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.227226 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.246635 4938 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.260864 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.261399 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.761377367 +0000 UTC m=+165.229214766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.339483 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.366832 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.367215 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.867199428 +0000 UTC m=+165.335036827 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.470529 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.470708 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.970681637 +0000 UTC m=+165.438519036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.471208 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.471591 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.97157467 +0000 UTC m=+165.439412069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.508237 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.574204 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.574746 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:33.074723481 +0000 UTC m=+165.542560880 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.678720 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: E1122 10:40:32.679027 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:40:33.179015501 +0000 UTC m=+165.646852900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-zr5wf" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.695461 4938 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-22T10:40:32.24665905Z","Handler":null,"Name":""} Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.702352 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.724766 4938 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.724805 4938 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.727755 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:40:32 crc kubenswrapper[4938]: W1122 10:40:32.757255 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc3aa5b9_8485_4d6f_8deb_c3d3c10b58db.slice/crio-c059bfe008754534ac02459d8c8a3ff77f1f6187646c16fbdf7707ec1456f165 WatchSource:0}: Error finding container c059bfe008754534ac02459d8c8a3ff77f1f6187646c16fbdf7707ec1456f165: Status 404 returned error can't find the container with id c059bfe008754534ac02459d8c8a3ff77f1f6187646c16fbdf7707ec1456f165 Nov 22 10:40:32 crc kubenswrapper[4938]: W1122 10:40:32.761039 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod372bd14a_9e2d_4320_92e4_534c44542975.slice/crio-c5e5587e5d055d495540daf404d72586ab15443a5d19b70d39af03a8672c0ac9 WatchSource:0}: Error finding container c5e5587e5d055d495540daf404d72586ab15443a5d19b70d39af03a8672c0ac9: Status 404 returned error can't find the container with id c5e5587e5d055d495540daf404d72586ab15443a5d19b70d39af03a8672c0ac9 Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.780015 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.786409 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.789810 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:32 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:32 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:32 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.789860 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.881046 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.889768 4938 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.889805 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:32 crc kubenswrapper[4938]: I1122 10:40:32.933602 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-zr5wf\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.051561 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.105662 4938 generic.go:334] "Generic (PLEG): container finished" podID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerID="d8f51ff5112981c09bf35da99ab36affda94932cb026dd13dbf59279f7c4ca19" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.105722 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerDied","Data":"d8f51ff5112981c09bf35da99ab36affda94932cb026dd13dbf59279f7c4ca19"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.105761 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerStarted","Data":"c059bfe008754534ac02459d8c8a3ff77f1f6187646c16fbdf7707ec1456f165"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.107799 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.110041 4938 generic.go:334] "Generic (PLEG): container finished" podID="372bd14a-9e2d-4320-92e4-534c44542975" containerID="7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.110253 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerDied","Data":"7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.110285 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerStarted","Data":"c5e5587e5d055d495540daf404d72586ab15443a5d19b70d39af03a8672c0ac9"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.115016 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2545656-f2d7-49a3-bcdf-52cd837b2a25" containerID="48858facce85c5b5ec7c6aa8eb78704b153f0a0e8a32f07cf0db394fa7f42fdc" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.115061 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e2545656-f2d7-49a3-bcdf-52cd837b2a25","Type":"ContainerDied","Data":"48858facce85c5b5ec7c6aa8eb78704b153f0a0e8a32f07cf0db394fa7f42fdc"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.122672 4938 generic.go:334] "Generic (PLEG): container finished" podID="3d2cdfd7-9190-4322-81d6-cf73f4815c8c" containerID="bfaf8526044e1feb6345834ea24d8521f5977f572a78d06001135fbcd9f73a02" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.122742 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" event={"ID":"3d2cdfd7-9190-4322-81d6-cf73f4815c8c","Type":"ContainerDied","Data":"bfaf8526044e1feb6345834ea24d8521f5977f572a78d06001135fbcd9f73a02"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126388 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126423 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126456 4938 generic.go:334] "Generic (PLEG): container finished" podID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerID="26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126533 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerDied","Data":"26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126553 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerStarted","Data":"4983d09124f47004182739f4b828b721eead49f83c47d86c9e21a0e475e4bb53"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126568 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.126626 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.129664 4938 generic.go:334] "Generic (PLEG): container finished" podID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerID="64c2550968ea274048c722f5c64f5f9f455804798ad7445f8ecac0d4f4872a6d" exitCode=0 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.129735 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerDied","Data":"64c2550968ea274048c722f5c64f5f9f455804798ad7445f8ecac0d4f4872a6d"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.129772 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerStarted","Data":"edacbdc9e6e635537e8c568c380a76341b5cc508316bc16275e83f898de7e1c4"} Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.205135 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.205194 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.211450 4938 patch_prober.go:28] interesting pod/console-f9d7485db-rl6xd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.41:8443/health\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.211518 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-rl6xd" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.41:8443/health\": dial tcp 10.217.0.41:8443: connect: connection refused" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.249903 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.250878 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.253396 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.262438 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.265715 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:40:33 crc kubenswrapper[4938]: W1122 10:40:33.282121 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cf5d758_b959_49f9_8e98_6f84ef428081.slice/crio-bde089d98aa6605fcc48fc9a5e381dba24adf9050b4f026ab1f46cc50a973fd2 WatchSource:0}: Error finding container bde089d98aa6605fcc48fc9a5e381dba24adf9050b4f026ab1f46cc50a973fd2: Status 404 returned error can't find the container with id bde089d98aa6605fcc48fc9a5e381dba24adf9050b4f026ab1f46cc50a973fd2 Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.285599 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf54z\" (UniqueName: \"kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.286470 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.286499 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.387899 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf54z\" (UniqueName: \"kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.388727 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.388755 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.389293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.389742 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.405726 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf54z\" (UniqueName: \"kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z\") pod \"redhat-marketplace-pcrdf\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.586428 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.653082 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.654644 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.660573 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.692685 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.692720 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn6jf\" (UniqueName: \"kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.692754 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.787272 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.793080 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:33 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:33 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:33 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.793128 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.793946 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.794022 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.794039 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn6jf\" (UniqueName: \"kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.794985 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.795224 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.829976 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn6jf\" (UniqueName: \"kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf\") pod \"redhat-marketplace-99zh8\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.986871 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:40:33 crc kubenswrapper[4938]: I1122 10:40:33.996339 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.001582 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c-metrics-certs\") pod \"network-metrics-daemon-s7w5f\" (UID: \"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c\") " pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.144132 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" event={"ID":"7cf5d758-b959-49f9-8e98-6f84ef428081","Type":"ContainerStarted","Data":"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07"} Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.144482 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" event={"ID":"7cf5d758-b959-49f9-8e98-6f84ef428081","Type":"ContainerStarted","Data":"bde089d98aa6605fcc48fc9a5e381dba24adf9050b4f026ab1f46cc50a973fd2"} Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.145052 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.150937 4938 generic.go:334] "Generic (PLEG): container finished" podID="f5f69615-9ff1-4803-95de-28529afe284f" containerID="f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d" exitCode=0 Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.151592 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerDied","Data":"f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d"} Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.151617 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerStarted","Data":"66cc4a05e7b5b824b574b882a8d03b7f3f982627c941aafc2af819eb8dc6845e"} Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.187951 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" podStartSLOduration=142.187927332 podStartE2EDuration="2m22.187927332s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:34.173753799 +0000 UTC m=+166.641591198" watchObservedRunningTime="2025-11-22 10:40:34.187927332 +0000 UTC m=+166.655764731" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.271052 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s7w5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.286360 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:40:34 crc kubenswrapper[4938]: W1122 10:40:34.338272 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc013bd6c_811a_4807_97cc_d5023243b5e3.slice/crio-1c4498f7e650dfc2c18742be9df55754e68ccbea1c7af7e0d302d4cb2fac848d WatchSource:0}: Error finding container 1c4498f7e650dfc2c18742be9df55754e68ccbea1c7af7e0d302d4cb2fac848d: Status 404 returned error can't find the container with id 1c4498f7e650dfc2c18742be9df55754e68ccbea1c7af7e0d302d4cb2fac848d Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.479696 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.480737 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.482623 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.482722 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.485238 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.509650 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z48k2\" (UniqueName: \"kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.509683 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.509704 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.519837 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.612256 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z48k2\" (UniqueName: \"kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.612398 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.612440 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.613322 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.613716 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.623595 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.637129 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z48k2\" (UniqueName: \"kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2\") pod \"redhat-operators-l956q\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.713697 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume\") pod \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.713768 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spwl6\" (UniqueName: \"kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6\") pod \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.713895 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume\") pod \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\" (UID: \"3d2cdfd7-9190-4322-81d6-cf73f4815c8c\") " Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.714788 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume" (OuterVolumeSpecName: "config-volume") pod "3d2cdfd7-9190-4322-81d6-cf73f4815c8c" (UID: "3d2cdfd7-9190-4322-81d6-cf73f4815c8c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.718642 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6" (OuterVolumeSpecName: "kube-api-access-spwl6") pod "3d2cdfd7-9190-4322-81d6-cf73f4815c8c" (UID: "3d2cdfd7-9190-4322-81d6-cf73f4815c8c"). InnerVolumeSpecName "kube-api-access-spwl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.718687 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3d2cdfd7-9190-4322-81d6-cf73f4815c8c" (UID: "3d2cdfd7-9190-4322-81d6-cf73f4815c8c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.734944 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.746821 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-fk4l7" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.750417 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.782286 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.782690 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.787391 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:34 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:34 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:34 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.787433 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.802448 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.824017 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-s7w5f"] Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.829384 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-999ph" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.831269 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access\") pod \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.831486 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir\") pod \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\" (UID: \"e2545656-f2d7-49a3-bcdf-52cd837b2a25\") " Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.832263 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spwl6\" (UniqueName: \"kubernetes.io/projected/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-kube-api-access-spwl6\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.832283 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.832297 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3d2cdfd7-9190-4322-81d6-cf73f4815c8c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.838599 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e2545656-f2d7-49a3-bcdf-52cd837b2a25" (UID: "e2545656-f2d7-49a3-bcdf-52cd837b2a25"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.857351 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e2545656-f2d7-49a3-bcdf-52cd837b2a25" (UID: "e2545656-f2d7-49a3-bcdf-52cd837b2a25"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.858714 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ggzxq" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.864265 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:40:34 crc kubenswrapper[4938]: E1122 10:40:34.864480 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d2cdfd7-9190-4322-81d6-cf73f4815c8c" containerName="collect-profiles" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.864491 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d2cdfd7-9190-4322-81d6-cf73f4815c8c" containerName="collect-profiles" Nov 22 10:40:34 crc kubenswrapper[4938]: E1122 10:40:34.864509 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2545656-f2d7-49a3-bcdf-52cd837b2a25" containerName="pruner" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.864514 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2545656-f2d7-49a3-bcdf-52cd837b2a25" containerName="pruner" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.864610 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2545656-f2d7-49a3-bcdf-52cd837b2a25" containerName="pruner" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.864620 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d2cdfd7-9190-4322-81d6-cf73f4815c8c" containerName="collect-profiles" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.865428 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.882600 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.938571 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.938670 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt92q\" (UniqueName: \"kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.938703 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.938764 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:34 crc kubenswrapper[4938]: I1122 10:40:34.938778 4938 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e2545656-f2d7-49a3-bcdf-52cd837b2a25-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.043711 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt92q\" (UniqueName: \"kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.043778 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.043848 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.044385 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.045670 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.081156 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt92q\" (UniqueName: \"kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q\") pod \"redhat-operators-hwr5f\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.162976 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" event={"ID":"3d2cdfd7-9190-4322-81d6-cf73f4815c8c","Type":"ContainerDied","Data":"c4c387ac52afcee0a5a76bfcd2e40c0a221140c5691ed1e23156ba1ca23c4d87"} Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.163002 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.163023 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4c387ac52afcee0a5a76bfcd2e40c0a221140c5691ed1e23156ba1ca23c4d87" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.166084 4938 generic.go:334] "Generic (PLEG): container finished" podID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerID="19dbfa8d1e471769ffd1c396f19d1ac5256e3a6ce285ac600c64d8a1c5c229de" exitCode=0 Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.166768 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerDied","Data":"19dbfa8d1e471769ffd1c396f19d1ac5256e3a6ce285ac600c64d8a1c5c229de"} Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.166804 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerStarted","Data":"1c4498f7e650dfc2c18742be9df55754e68ccbea1c7af7e0d302d4cb2fac848d"} Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.174454 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" event={"ID":"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c","Type":"ContainerStarted","Data":"fbac9ceeb167b13ea3ea38c0d1d4712665ccba510d3357dce032cc151b3c96bf"} Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.178029 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.179602 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e2545656-f2d7-49a3-bcdf-52cd837b2a25","Type":"ContainerDied","Data":"d5213a6f6737c486bda9a672ee8cf2c5b0838e97b1dfcad35301449031e4bc87"} Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.179646 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5213a6f6737c486bda9a672ee8cf2c5b0838e97b1dfcad35301449031e4bc87" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.275673 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.505670 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:40:35 crc kubenswrapper[4938]: W1122 10:40:35.554797 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80700ea3_9e71_45b9_8896_12c1ed5d4d00.slice/crio-a85692ab917b8f5bc5b350e4c29866dad38a861c914a5e05b64fb5f4931c1d9b WatchSource:0}: Error finding container a85692ab917b8f5bc5b350e4c29866dad38a861c914a5e05b64fb5f4931c1d9b: Status 404 returned error can't find the container with id a85692ab917b8f5bc5b350e4c29866dad38a861c914a5e05b64fb5f4931c1d9b Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.578084 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:40:35 crc kubenswrapper[4938]: W1122 10:40:35.589364 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded2a3c1c_2ab4_48b9_991b_38847c71f996.slice/crio-44404f02587bc4103fcd191f9d1c6725ee343be996355c69737cc6b6fd6013af WatchSource:0}: Error finding container 44404f02587bc4103fcd191f9d1c6725ee343be996355c69737cc6b6fd6013af: Status 404 returned error can't find the container with id 44404f02587bc4103fcd191f9d1c6725ee343be996355c69737cc6b6fd6013af Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.804323 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:35 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:35 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:35 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:35 crc kubenswrapper[4938]: I1122 10:40:35.804390 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.200232 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerStarted","Data":"44404f02587bc4103fcd191f9d1c6725ee343be996355c69737cc6b6fd6013af"} Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.202021 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerStarted","Data":"a85692ab917b8f5bc5b350e4c29866dad38a861c914a5e05b64fb5f4931c1d9b"} Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.757092 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.757747 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.760084 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.760582 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.767614 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.791572 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:36 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:36 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:36 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.791649 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.879848 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.880204 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.981106 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.981177 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:36 crc kubenswrapper[4938]: I1122 10:40:36.981254 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:37 crc kubenswrapper[4938]: I1122 10:40:37.000646 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:37 crc kubenswrapper[4938]: I1122 10:40:37.102177 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:37 crc kubenswrapper[4938]: I1122 10:40:37.638135 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:40:37 crc kubenswrapper[4938]: I1122 10:40:37.784744 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:37 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:37 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:37 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:37 crc kubenswrapper[4938]: I1122 10:40:37.785048 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.254723 4938 generic.go:334] "Generic (PLEG): container finished" podID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerID="b951df96b39f9185d9508d2dc749b04526b01e508cbf8dfc2d3728cfd352e9e3" exitCode=0 Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.254797 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerDied","Data":"b951df96b39f9185d9508d2dc749b04526b01e508cbf8dfc2d3728cfd352e9e3"} Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.261236 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" event={"ID":"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c","Type":"ContainerStarted","Data":"41a9a7197cd6aaae4abb44d3fa58daf067c0160288f74d343c5a3121bc713342"} Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.266902 4938 generic.go:334] "Generic (PLEG): container finished" podID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerID="57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2" exitCode=0 Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.267075 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerDied","Data":"57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2"} Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.269471 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a63f51d-82cc-4c0d-b847-9c3c93ab242c","Type":"ContainerStarted","Data":"9c61fe50407932f5a996384de9cf01501255b9b144aac2407e55aad72917ffb5"} Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.783947 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:38 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:38 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:38 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:38 crc kubenswrapper[4938]: I1122 10:40:38.784008 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:39 crc kubenswrapper[4938]: I1122 10:40:39.782999 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:39 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:39 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:39 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:39 crc kubenswrapper[4938]: I1122 10:40:39.783290 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:39 crc kubenswrapper[4938]: I1122 10:40:39.881135 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-dd667" Nov 22 10:40:40 crc kubenswrapper[4938]: I1122 10:40:40.791600 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:40 crc kubenswrapper[4938]: [-]has-synced failed: reason withheld Nov 22 10:40:40 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:40 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:40 crc kubenswrapper[4938]: I1122 10:40:40.791666 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:41 crc kubenswrapper[4938]: I1122 10:40:41.300874 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:40:41 crc kubenswrapper[4938]: I1122 10:40:41.300970 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:40:41 crc kubenswrapper[4938]: I1122 10:40:41.787447 4938 patch_prober.go:28] interesting pod/router-default-5444994796-mgpzv container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:40:41 crc kubenswrapper[4938]: [+]has-synced ok Nov 22 10:40:41 crc kubenswrapper[4938]: [+]process-running ok Nov 22 10:40:41 crc kubenswrapper[4938]: healthz check failed Nov 22 10:40:41 crc kubenswrapper[4938]: I1122 10:40:41.787510 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mgpzv" podUID="6e794058-99a7-45d0-ba53-8a6d3b1c7d1b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:40:42 crc kubenswrapper[4938]: I1122 10:40:42.318396 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a63f51d-82cc-4c0d-b847-9c3c93ab242c","Type":"ContainerStarted","Data":"5e4695bfdd8194fbed44be3c3ee09a549e1533785c1a48abe8458fcc2c7e2179"} Nov 22 10:40:42 crc kubenswrapper[4938]: I1122 10:40:42.783848 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:42 crc kubenswrapper[4938]: I1122 10:40:42.788072 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-mgpzv" Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.127359 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.127377 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.127417 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.127432 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.205267 4938 patch_prober.go:28] interesting pod/console-f9d7485db-rl6xd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.41:8443/health\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.205322 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-rl6xd" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.41:8443/health\": dial tcp 10.217.0.41:8443: connect: connection refused" Nov 22 10:40:43 crc kubenswrapper[4938]: I1122 10:40:43.330611 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s7w5f" event={"ID":"7d7f16a7-bfe2-42d2-ab1d-8fdf9481873c","Type":"ContainerStarted","Data":"9a4c057786033b734f04611c206c0861c6d1ad1972b5ae576a953089fe5cfe5c"} Nov 22 10:40:45 crc kubenswrapper[4938]: I1122 10:40:45.341420 4938 generic.go:334] "Generic (PLEG): container finished" podID="3a63f51d-82cc-4c0d-b847-9c3c93ab242c" containerID="5e4695bfdd8194fbed44be3c3ee09a549e1533785c1a48abe8458fcc2c7e2179" exitCode=0 Nov 22 10:40:45 crc kubenswrapper[4938]: I1122 10:40:45.341546 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a63f51d-82cc-4c0d-b847-9c3c93ab242c","Type":"ContainerDied","Data":"5e4695bfdd8194fbed44be3c3ee09a549e1533785c1a48abe8458fcc2c7e2179"} Nov 22 10:40:46 crc kubenswrapper[4938]: I1122 10:40:46.368868 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-s7w5f" podStartSLOduration=154.368777316 podStartE2EDuration="2m34.368777316s" podCreationTimestamp="2025-11-22 10:38:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:40:46.363245581 +0000 UTC m=+178.831082970" watchObservedRunningTime="2025-11-22 10:40:46.368777316 +0000 UTC m=+178.836614715" Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.824956 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.894805 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access\") pod \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.895036 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir\") pod \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\" (UID: \"3a63f51d-82cc-4c0d-b847-9c3c93ab242c\") " Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.895113 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3a63f51d-82cc-4c0d-b847-9c3c93ab242c" (UID: "3a63f51d-82cc-4c0d-b847-9c3c93ab242c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.895339 4938 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.901204 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3a63f51d-82cc-4c0d-b847-9c3c93ab242c" (UID: "3a63f51d-82cc-4c0d-b847-9c3c93ab242c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:49 crc kubenswrapper[4938]: I1122 10:40:49.995963 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a63f51d-82cc-4c0d-b847-9c3c93ab242c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:50 crc kubenswrapper[4938]: I1122 10:40:50.369826 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3a63f51d-82cc-4c0d-b847-9c3c93ab242c","Type":"ContainerDied","Data":"9c61fe50407932f5a996384de9cf01501255b9b144aac2407e55aad72917ffb5"} Nov 22 10:40:50 crc kubenswrapper[4938]: I1122 10:40:50.370118 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c61fe50407932f5a996384de9cf01501255b9b144aac2407e55aad72917ffb5" Nov 22 10:40:50 crc kubenswrapper[4938]: I1122 10:40:50.369869 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.057557 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127164 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127184 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127224 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127236 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127272 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127817 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"295371d8f00675995a5af71b0f020386c3e30faa48f12ed077a40c5d2dfca6aa"} pod="openshift-console/downloads-7954f5f757-k5zr9" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.127950 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" containerID="cri-o://295371d8f00675995a5af71b0f020386c3e30faa48f12ed077a40c5d2dfca6aa" gracePeriod=2 Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.128680 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.128738 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.209132 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:53 crc kubenswrapper[4938]: I1122 10:40:53.212546 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:40:54 crc kubenswrapper[4938]: I1122 10:40:54.389627 4938 generic.go:334] "Generic (PLEG): container finished" podID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerID="295371d8f00675995a5af71b0f020386c3e30faa48f12ed077a40c5d2dfca6aa" exitCode=0 Nov 22 10:40:54 crc kubenswrapper[4938]: I1122 10:40:54.389789 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k5zr9" event={"ID":"9a78982d-f026-44c1-a2d2-ec9caa99331c","Type":"ContainerDied","Data":"295371d8f00675995a5af71b0f020386c3e30faa48f12ed077a40c5d2dfca6aa"} Nov 22 10:40:59 crc kubenswrapper[4938]: I1122 10:40:59.580027 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:03 crc kubenswrapper[4938]: I1122 10:41:03.127948 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:03 crc kubenswrapper[4938]: I1122 10:41:03.128295 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:41:03 crc kubenswrapper[4938]: E1122 10:41:03.419140 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 10:41:03 crc kubenswrapper[4938]: E1122 10:41:03.419306 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vn6jf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-99zh8_openshift-marketplace(c013bd6c-811a-4807-97cc-d5023243b5e3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:03 crc kubenswrapper[4938]: E1122 10:41:03.420769 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-99zh8" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" Nov 22 10:41:04 crc kubenswrapper[4938]: I1122 10:41:04.766010 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4t6l6" Nov 22 10:41:11 crc kubenswrapper[4938]: I1122 10:41:11.301125 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:41:11 crc kubenswrapper[4938]: I1122 10:41:11.301410 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:41:13 crc kubenswrapper[4938]: I1122 10:41:13.127766 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:13 crc kubenswrapper[4938]: I1122 10:41:13.127826 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.792154 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.792815 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xf54z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-pcrdf_openshift-marketplace(f5f69615-9ff1-4803-95de-28529afe284f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.794154 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-pcrdf" podUID="f5f69615-9ff1-4803-95de-28529afe284f" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.795976 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.796187 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-97p8p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-tqbjm_openshift-marketplace(4381753c-a419-4f63-acc0-6b1bc5529b75): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:22 crc kubenswrapper[4938]: E1122 10:41:22.797689 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-tqbjm" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" Nov 22 10:41:23 crc kubenswrapper[4938]: I1122 10:41:23.127518 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:23 crc kubenswrapper[4938]: I1122 10:41:23.127606 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:41:33 crc kubenswrapper[4938]: I1122 10:41:33.127357 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:33 crc kubenswrapper[4938]: I1122 10:41:33.128891 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.221990 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.222567 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d2s2z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-gv775_openshift-marketplace(08937d01-18fd-41a6-a44d-5cae36807fc7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.223804 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-gv775" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.900858 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.902561 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kbwdm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-dh2qc_openshift-marketplace(cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:35 crc kubenswrapper[4938]: E1122 10:41:35.903791 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-dh2qc" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" Nov 22 10:41:41 crc kubenswrapper[4938]: I1122 10:41:41.301139 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:41:41 crc kubenswrapper[4938]: I1122 10:41:41.301491 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:41:41 crc kubenswrapper[4938]: I1122 10:41:41.301548 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:41:41 crc kubenswrapper[4938]: I1122 10:41:41.302190 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:41:41 crc kubenswrapper[4938]: I1122 10:41:41.302244 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f" gracePeriod=600 Nov 22 10:41:43 crc kubenswrapper[4938]: I1122 10:41:43.127121 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:43 crc kubenswrapper[4938]: I1122 10:41:43.127207 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:41:45 crc kubenswrapper[4938]: E1122 10:41:45.492292 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 10:41:45 crc kubenswrapper[4938]: E1122 10:41:45.492897 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdd9b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jcz58_openshift-marketplace(372bd14a-9e2d-4320-92e4-534c44542975): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:45 crc kubenswrapper[4938]: E1122 10:41:45.494431 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-jcz58" podUID="372bd14a-9e2d-4320-92e4-534c44542975" Nov 22 10:41:48 crc kubenswrapper[4938]: I1122 10:41:48.653583 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f" exitCode=0 Nov 22 10:41:48 crc kubenswrapper[4938]: I1122 10:41:48.653673 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f"} Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.260330 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jcz58" podUID="372bd14a-9e2d-4320-92e4-534c44542975" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.281058 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.281244 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lt92q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-hwr5f_openshift-marketplace(ed2a3c1c-2ab4-48b9-991b-38847c71f996): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.282556 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-hwr5f" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.291440 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.291676 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z48k2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-l956q_openshift-marketplace(80700ea3-9e71-45b9-8896-12c1ed5d4d00): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:41:51 crc kubenswrapper[4938]: E1122 10:41:51.292973 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-l956q" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" Nov 22 10:41:53 crc kubenswrapper[4938]: I1122 10:41:53.127399 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:41:53 crc kubenswrapper[4938]: I1122 10:41:53.127780 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:01 crc kubenswrapper[4938]: E1122 10:42:01.097612 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-hwr5f" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" Nov 22 10:42:01 crc kubenswrapper[4938]: E1122 10:42:01.099524 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 10:42:01 crc kubenswrapper[4938]: E1122 10:42:01.099705 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vn6jf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-99zh8_openshift-marketplace(c013bd6c-811a-4807-97cc-d5023243b5e3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:42:01 crc kubenswrapper[4938]: E1122 10:42:01.100939 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-99zh8" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" Nov 22 10:42:03 crc kubenswrapper[4938]: I1122 10:42:03.127613 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:42:03 crc kubenswrapper[4938]: I1122 10:42:03.127705 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4938]: E1122 10:42:08.688129 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-99zh8" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" Nov 22 10:42:08 crc kubenswrapper[4938]: I1122 10:42:08.782324 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-k5zr9" event={"ID":"9a78982d-f026-44c1-a2d2-ec9caa99331c","Type":"ContainerStarted","Data":"2fa76eda60db3e1158f1792b41daea8d0565a62aecbc6b11d8b53f266623339e"} Nov 22 10:42:08 crc kubenswrapper[4938]: I1122 10:42:08.782665 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:42:08 crc kubenswrapper[4938]: I1122 10:42:08.782984 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4938]: I1122 10:42:08.783020 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:09 crc kubenswrapper[4938]: I1122 10:42:09.797657 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:42:09 crc kubenswrapper[4938]: I1122 10:42:09.797738 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:11 crc kubenswrapper[4938]: I1122 10:42:11.808715 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.814775 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerStarted","Data":"cd9e69e40cda7764e45783b246ad6c9667754a5c7c9b134b9f0c720f55207073"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.816566 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerStarted","Data":"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.818054 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerStarted","Data":"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.819416 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerStarted","Data":"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.821206 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerStarted","Data":"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8"} Nov 22 10:42:12 crc kubenswrapper[4938]: I1122 10:42:12.822847 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerStarted","Data":"1225ec6496589b789bc797d66f6f91a86725f75d6aa81c9f699525992bc45120"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.127002 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.127060 4938 patch_prober.go:28] interesting pod/downloads-7954f5f757-k5zr9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.127066 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.127104 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-k5zr9" podUID="9a78982d-f026-44c1-a2d2-ec9caa99331c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.22:8080/\": dial tcp 10.217.0.22:8080: connect: connection refused" Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.830120 4938 generic.go:334] "Generic (PLEG): container finished" podID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerID="1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.830714 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerDied","Data":"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.833691 4938 generic.go:334] "Generic (PLEG): container finished" podID="f5f69615-9ff1-4803-95de-28529afe284f" containerID="a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.833751 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerDied","Data":"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.837003 4938 generic.go:334] "Generic (PLEG): container finished" podID="372bd14a-9e2d-4320-92e4-534c44542975" containerID="54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.837055 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerDied","Data":"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.838697 4938 generic.go:334] "Generic (PLEG): container finished" podID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerID="40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.838765 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerDied","Data":"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.840793 4938 generic.go:334] "Generic (PLEG): container finished" podID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerID="1225ec6496589b789bc797d66f6f91a86725f75d6aa81c9f699525992bc45120" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.840856 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerDied","Data":"1225ec6496589b789bc797d66f6f91a86725f75d6aa81c9f699525992bc45120"} Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.843175 4938 generic.go:334] "Generic (PLEG): container finished" podID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerID="cd9e69e40cda7764e45783b246ad6c9667754a5c7c9b134b9f0c720f55207073" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4938]: I1122 10:42:13.843226 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerDied","Data":"cd9e69e40cda7764e45783b246ad6c9667754a5c7c9b134b9f0c720f55207073"} Nov 22 10:42:23 crc kubenswrapper[4938]: I1122 10:42:23.142126 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-k5zr9" Nov 22 10:42:53 crc kubenswrapper[4938]: I1122 10:42:53.066774 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerStarted","Data":"6c4d8104f4cf0287447b1bde313ddb268d84af761ddac939f2e164a4a6b04710"} Nov 22 10:42:53 crc kubenswrapper[4938]: I1122 10:42:53.089893 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gv775" podStartSLOduration=26.67552592 podStartE2EDuration="2m22.089867096s" podCreationTimestamp="2025-11-22 10:40:31 +0000 UTC" firstStartedPulling="2025-11-22 10:40:33.131149812 +0000 UTC m=+165.598987201" lastFinishedPulling="2025-11-22 10:42:28.545490978 +0000 UTC m=+281.013328377" observedRunningTime="2025-11-22 10:42:53.088000337 +0000 UTC m=+305.555837746" watchObservedRunningTime="2025-11-22 10:42:53.089867096 +0000 UTC m=+305.557704515" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.073231 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerStarted","Data":"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.075418 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerStarted","Data":"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.078356 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerStarted","Data":"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.079902 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerStarted","Data":"54bd749a309c29e66a5145025b99e2a21a81ca05ace25b730626e4c7351d589b"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.082251 4938 generic.go:334] "Generic (PLEG): container finished" podID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerID="e98669a22b93207d32a9b9cc9031b6f5762b99249be0eeb477388d7df05a26fb" exitCode=0 Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.082310 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerDied","Data":"e98669a22b93207d32a9b9cc9031b6f5762b99249be0eeb477388d7df05a26fb"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.086596 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerStarted","Data":"0dff5b473d83799a75c3b1f450aed5c53390c49c557015727c07e3e069480c27"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.093560 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerStarted","Data":"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d"} Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.112010 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dh2qc" podStartSLOduration=3.207510859 podStartE2EDuration="2m23.111993564s" podCreationTimestamp="2025-11-22 10:40:31 +0000 UTC" firstStartedPulling="2025-11-22 10:40:33.10748314 +0000 UTC m=+165.575320539" lastFinishedPulling="2025-11-22 10:42:53.011965805 +0000 UTC m=+305.479803244" observedRunningTime="2025-11-22 10:42:54.111382718 +0000 UTC m=+306.579220117" watchObservedRunningTime="2025-11-22 10:42:54.111993564 +0000 UTC m=+306.579830963" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.115683 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l956q" podStartSLOduration=6.590509664 podStartE2EDuration="2m20.115661758s" podCreationTimestamp="2025-11-22 10:40:34 +0000 UTC" firstStartedPulling="2025-11-22 10:40:39.297341101 +0000 UTC m=+171.765178500" lastFinishedPulling="2025-11-22 10:42:52.822493195 +0000 UTC m=+305.290330594" observedRunningTime="2025-11-22 10:42:54.093425725 +0000 UTC m=+306.561263134" watchObservedRunningTime="2025-11-22 10:42:54.115661758 +0000 UTC m=+306.583499157" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.136511 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tqbjm" podStartSLOduration=3.251447662 podStartE2EDuration="2m23.136493976s" podCreationTimestamp="2025-11-22 10:40:31 +0000 UTC" firstStartedPulling="2025-11-22 10:40:33.128333428 +0000 UTC m=+165.596170817" lastFinishedPulling="2025-11-22 10:42:53.013379702 +0000 UTC m=+305.481217131" observedRunningTime="2025-11-22 10:42:54.132544414 +0000 UTC m=+306.600381813" watchObservedRunningTime="2025-11-22 10:42:54.136493976 +0000 UTC m=+306.604331385" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.152412 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jcz58" podStartSLOduration=3.33884705 podStartE2EDuration="2m23.152393346s" podCreationTimestamp="2025-11-22 10:40:31 +0000 UTC" firstStartedPulling="2025-11-22 10:40:33.111610079 +0000 UTC m=+165.579447478" lastFinishedPulling="2025-11-22 10:42:52.925156335 +0000 UTC m=+305.392993774" observedRunningTime="2025-11-22 10:42:54.14595199 +0000 UTC m=+306.613789389" watchObservedRunningTime="2025-11-22 10:42:54.152393346 +0000 UTC m=+306.620230745" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.196864 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pcrdf" podStartSLOduration=2.527171996 podStartE2EDuration="2m21.196837383s" podCreationTimestamp="2025-11-22 10:40:33 +0000 UTC" firstStartedPulling="2025-11-22 10:40:34.152612233 +0000 UTC m=+166.620449632" lastFinishedPulling="2025-11-22 10:42:52.82227762 +0000 UTC m=+305.290115019" observedRunningTime="2025-11-22 10:42:54.193742363 +0000 UTC m=+306.661579772" watchObservedRunningTime="2025-11-22 10:42:54.196837383 +0000 UTC m=+306.664674782" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.803474 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:42:54 crc kubenswrapper[4938]: I1122 10:42:54.804374 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:42:55 crc kubenswrapper[4938]: I1122 10:42:55.099904 4938 generic.go:334] "Generic (PLEG): container finished" podID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerID="54bd749a309c29e66a5145025b99e2a21a81ca05ace25b730626e4c7351d589b" exitCode=0 Nov 22 10:42:55 crc kubenswrapper[4938]: I1122 10:42:55.099979 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerDied","Data":"54bd749a309c29e66a5145025b99e2a21a81ca05ace25b730626e4c7351d589b"} Nov 22 10:42:56 crc kubenswrapper[4938]: I1122 10:42:56.107350 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerStarted","Data":"f7730f5556382b36fc05e7d840eb9f3da405bbbc47b892eea06e79b486b04808"} Nov 22 10:42:56 crc kubenswrapper[4938]: I1122 10:42:56.123273 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-99zh8" podStartSLOduration=3.161330951 podStartE2EDuration="2m23.123254288s" podCreationTimestamp="2025-11-22 10:40:33 +0000 UTC" firstStartedPulling="2025-11-22 10:40:35.169220677 +0000 UTC m=+167.637058076" lastFinishedPulling="2025-11-22 10:42:55.131144024 +0000 UTC m=+307.598981413" observedRunningTime="2025-11-22 10:42:56.122461397 +0000 UTC m=+308.590298796" watchObservedRunningTime="2025-11-22 10:42:56.123254288 +0000 UTC m=+308.591091687" Nov 22 10:42:56 crc kubenswrapper[4938]: I1122 10:42:56.313345 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l956q" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="registry-server" probeResult="failure" output=< Nov 22 10:42:56 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 10:42:56 crc kubenswrapper[4938]: > Nov 22 10:42:57 crc kubenswrapper[4938]: I1122 10:42:57.114045 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerStarted","Data":"c3d1c065edf43fd02989f8f9beb2b12bd04b95213634502d25ad620f7c15cc6e"} Nov 22 10:42:57 crc kubenswrapper[4938]: I1122 10:42:57.134099 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hwr5f" podStartSLOduration=7.998039616 podStartE2EDuration="2m23.134080264s" podCreationTimestamp="2025-11-22 10:40:34 +0000 UTC" firstStartedPulling="2025-11-22 10:40:41.312845353 +0000 UTC m=+173.780682752" lastFinishedPulling="2025-11-22 10:42:56.448886001 +0000 UTC m=+308.916723400" observedRunningTime="2025-11-22 10:42:57.131274342 +0000 UTC m=+309.599111761" watchObservedRunningTime="2025-11-22 10:42:57.134080264 +0000 UTC m=+309.601917663" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.916981 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.917390 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.938898 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.938957 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.965633 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:01 crc kubenswrapper[4938]: I1122 10:43:01.965701 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.229187 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.229262 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.279148 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.280131 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.280229 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.281387 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.317938 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.319565 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:02 crc kubenswrapper[4938]: I1122 10:43:02.330679 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.197657 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.587140 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.587207 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.649193 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.987486 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:03 crc kubenswrapper[4938]: I1122 10:43:03.987535 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.031257 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.104869 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.164981 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gv775" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="registry-server" containerID="cri-o://6c4d8104f4cf0287447b1bde313ddb268d84af761ddac939f2e164a4a6b04710" gracePeriod=2 Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.212359 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.212413 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.706118 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.855235 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:43:04 crc kubenswrapper[4938]: I1122 10:43:04.903410 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:43:05 crc kubenswrapper[4938]: I1122 10:43:05.170808 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dh2qc" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="registry-server" containerID="cri-o://0dff5b473d83799a75c3b1f450aed5c53390c49c557015727c07e3e069480c27" gracePeriod=2 Nov 22 10:43:05 crc kubenswrapper[4938]: I1122 10:43:05.276905 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:05 crc kubenswrapper[4938]: I1122 10:43:05.277858 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:05 crc kubenswrapper[4938]: I1122 10:43:05.344538 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:06 crc kubenswrapper[4938]: I1122 10:43:06.217972 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:06 crc kubenswrapper[4938]: I1122 10:43:06.505130 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:43:06 crc kubenswrapper[4938]: I1122 10:43:06.505371 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-99zh8" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="registry-server" containerID="cri-o://f7730f5556382b36fc05e7d840eb9f3da405bbbc47b892eea06e79b486b04808" gracePeriod=2 Nov 22 10:43:07 crc kubenswrapper[4938]: I1122 10:43:07.182659 4938 generic.go:334] "Generic (PLEG): container finished" podID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerID="6c4d8104f4cf0287447b1bde313ddb268d84af761ddac939f2e164a4a6b04710" exitCode=0 Nov 22 10:43:07 crc kubenswrapper[4938]: I1122 10:43:07.182724 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerDied","Data":"6c4d8104f4cf0287447b1bde313ddb268d84af761ddac939f2e164a4a6b04710"} Nov 22 10:43:08 crc kubenswrapper[4938]: I1122 10:43:08.195561 4938 generic.go:334] "Generic (PLEG): container finished" podID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerID="0dff5b473d83799a75c3b1f450aed5c53390c49c557015727c07e3e069480c27" exitCode=0 Nov 22 10:43:08 crc kubenswrapper[4938]: I1122 10:43:08.195613 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerDied","Data":"0dff5b473d83799a75c3b1f450aed5c53390c49c557015727c07e3e069480c27"} Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.505862 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.506134 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hwr5f" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="registry-server" containerID="cri-o://c3d1c065edf43fd02989f8f9beb2b12bd04b95213634502d25ad620f7c15cc6e" gracePeriod=2 Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.885060 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.980202 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2s2z\" (UniqueName: \"kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z\") pod \"08937d01-18fd-41a6-a44d-5cae36807fc7\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.980255 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities\") pod \"08937d01-18fd-41a6-a44d-5cae36807fc7\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.980284 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content\") pod \"08937d01-18fd-41a6-a44d-5cae36807fc7\" (UID: \"08937d01-18fd-41a6-a44d-5cae36807fc7\") " Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.981413 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities" (OuterVolumeSpecName: "utilities") pod "08937d01-18fd-41a6-a44d-5cae36807fc7" (UID: "08937d01-18fd-41a6-a44d-5cae36807fc7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:09 crc kubenswrapper[4938]: I1122 10:43:09.985291 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z" (OuterVolumeSpecName: "kube-api-access-d2s2z") pod "08937d01-18fd-41a6-a44d-5cae36807fc7" (UID: "08937d01-18fd-41a6-a44d-5cae36807fc7"). InnerVolumeSpecName "kube-api-access-d2s2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.028683 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08937d01-18fd-41a6-a44d-5cae36807fc7" (UID: "08937d01-18fd-41a6-a44d-5cae36807fc7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.081389 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2s2z\" (UniqueName: \"kubernetes.io/projected/08937d01-18fd-41a6-a44d-5cae36807fc7-kube-api-access-d2s2z\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.081418 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.081428 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08937d01-18fd-41a6-a44d-5cae36807fc7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.206820 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gv775" event={"ID":"08937d01-18fd-41a6-a44d-5cae36807fc7","Type":"ContainerDied","Data":"edacbdc9e6e635537e8c568c380a76341b5cc508316bc16275e83f898de7e1c4"} Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.206887 4938 scope.go:117] "RemoveContainer" containerID="6c4d8104f4cf0287447b1bde313ddb268d84af761ddac939f2e164a4a6b04710" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.206903 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gv775" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.226505 4938 scope.go:117] "RemoveContainer" containerID="1225ec6496589b789bc797d66f6f91a86725f75d6aa81c9f699525992bc45120" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.232214 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.234277 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gv775"] Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.260593 4938 scope.go:117] "RemoveContainer" containerID="64c2550968ea274048c722f5c64f5f9f455804798ad7445f8ecac0d4f4872a6d" Nov 22 10:43:10 crc kubenswrapper[4938]: I1122 10:43:10.457598 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" path="/var/lib/kubelet/pods/08937d01-18fd-41a6-a44d-5cae36807fc7/volumes" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.079535 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.196931 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbwdm\" (UniqueName: \"kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm\") pod \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.197346 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content\") pod \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.198423 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities\") pod \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\" (UID: \"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db\") " Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.199279 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities" (OuterVolumeSpecName: "utilities") pod "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" (UID: "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.200451 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm" (OuterVolumeSpecName: "kube-api-access-kbwdm") pod "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" (UID: "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db"). InnerVolumeSpecName "kube-api-access-kbwdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.214559 4938 generic.go:334] "Generic (PLEG): container finished" podID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerID="f7730f5556382b36fc05e7d840eb9f3da405bbbc47b892eea06e79b486b04808" exitCode=0 Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.214636 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerDied","Data":"f7730f5556382b36fc05e7d840eb9f3da405bbbc47b892eea06e79b486b04808"} Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.216534 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dh2qc" event={"ID":"cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db","Type":"ContainerDied","Data":"c059bfe008754534ac02459d8c8a3ff77f1f6187646c16fbdf7707ec1456f165"} Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.216590 4938 scope.go:117] "RemoveContainer" containerID="0dff5b473d83799a75c3b1f450aed5c53390c49c557015727c07e3e069480c27" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.216589 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dh2qc" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.231336 4938 scope.go:117] "RemoveContainer" containerID="cd9e69e40cda7764e45783b246ad6c9667754a5c7c9b134b9f0c720f55207073" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.241387 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" (UID: "cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.247867 4938 scope.go:117] "RemoveContainer" containerID="d8f51ff5112981c09bf35da99ab36affda94932cb026dd13dbf59279f7c4ca19" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.299614 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.299644 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.299653 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbwdm\" (UniqueName: \"kubernetes.io/projected/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db-kube-api-access-kbwdm\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.546841 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:43:11 crc kubenswrapper[4938]: I1122 10:43:11.553051 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dh2qc"] Nov 22 10:43:12 crc kubenswrapper[4938]: I1122 10:43:12.462959 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" path="/var/lib/kubelet/pods/cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db/volumes" Nov 22 10:43:12 crc kubenswrapper[4938]: I1122 10:43:12.950528 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.122269 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn6jf\" (UniqueName: \"kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf\") pod \"c013bd6c-811a-4807-97cc-d5023243b5e3\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.122428 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content\") pod \"c013bd6c-811a-4807-97cc-d5023243b5e3\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.122517 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities\") pod \"c013bd6c-811a-4807-97cc-d5023243b5e3\" (UID: \"c013bd6c-811a-4807-97cc-d5023243b5e3\") " Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.123975 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities" (OuterVolumeSpecName: "utilities") pod "c013bd6c-811a-4807-97cc-d5023243b5e3" (UID: "c013bd6c-811a-4807-97cc-d5023243b5e3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.131117 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf" (OuterVolumeSpecName: "kube-api-access-vn6jf") pod "c013bd6c-811a-4807-97cc-d5023243b5e3" (UID: "c013bd6c-811a-4807-97cc-d5023243b5e3"). InnerVolumeSpecName "kube-api-access-vn6jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.148830 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c013bd6c-811a-4807-97cc-d5023243b5e3" (UID: "c013bd6c-811a-4807-97cc-d5023243b5e3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.223397 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.223427 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c013bd6c-811a-4807-97cc-d5023243b5e3-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.223437 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn6jf\" (UniqueName: \"kubernetes.io/projected/c013bd6c-811a-4807-97cc-d5023243b5e3-kube-api-access-vn6jf\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.230163 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hwr5f_ed2a3c1c-2ab4-48b9-991b-38847c71f996/registry-server/0.log" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.230759 4938 generic.go:334] "Generic (PLEG): container finished" podID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerID="c3d1c065edf43fd02989f8f9beb2b12bd04b95213634502d25ad620f7c15cc6e" exitCode=137 Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.230805 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerDied","Data":"c3d1c065edf43fd02989f8f9beb2b12bd04b95213634502d25ad620f7c15cc6e"} Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.232321 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99zh8" event={"ID":"c013bd6c-811a-4807-97cc-d5023243b5e3","Type":"ContainerDied","Data":"1c4498f7e650dfc2c18742be9df55754e68ccbea1c7af7e0d302d4cb2fac848d"} Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.232350 4938 scope.go:117] "RemoveContainer" containerID="f7730f5556382b36fc05e7d840eb9f3da405bbbc47b892eea06e79b486b04808" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.232482 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99zh8" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.245608 4938 scope.go:117] "RemoveContainer" containerID="e98669a22b93207d32a9b9cc9031b6f5762b99249be0eeb477388d7df05a26fb" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.262087 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.264957 4938 scope.go:117] "RemoveContainer" containerID="19dbfa8d1e471769ffd1c396f19d1ac5256e3a6ce285ac600c64d8a1c5c229de" Nov 22 10:43:13 crc kubenswrapper[4938]: I1122 10:43:13.265527 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-99zh8"] Nov 22 10:43:14 crc kubenswrapper[4938]: I1122 10:43:14.457478 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" path="/var/lib/kubelet/pods/c013bd6c-811a-4807-97cc-d5023243b5e3/volumes" Nov 22 10:43:14 crc kubenswrapper[4938]: I1122 10:43:14.887432 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hwr5f_ed2a3c1c-2ab4-48b9-991b-38847c71f996/registry-server/0.log" Nov 22 10:43:14 crc kubenswrapper[4938]: I1122 10:43:14.888338 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.049775 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content\") pod \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.049891 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities\") pod \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.051741 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities" (OuterVolumeSpecName: "utilities") pod "ed2a3c1c-2ab4-48b9-991b-38847c71f996" (UID: "ed2a3c1c-2ab4-48b9-991b-38847c71f996"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.051970 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lt92q\" (UniqueName: \"kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q\") pod \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\" (UID: \"ed2a3c1c-2ab4-48b9-991b-38847c71f996\") " Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.053382 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.056469 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q" (OuterVolumeSpecName: "kube-api-access-lt92q") pod "ed2a3c1c-2ab4-48b9-991b-38847c71f996" (UID: "ed2a3c1c-2ab4-48b9-991b-38847c71f996"). InnerVolumeSpecName "kube-api-access-lt92q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.154762 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lt92q\" (UniqueName: \"kubernetes.io/projected/ed2a3c1c-2ab4-48b9-991b-38847c71f996-kube-api-access-lt92q\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.246770 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hwr5f_ed2a3c1c-2ab4-48b9-991b-38847c71f996/registry-server/0.log" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.247529 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hwr5f" event={"ID":"ed2a3c1c-2ab4-48b9-991b-38847c71f996","Type":"ContainerDied","Data":"44404f02587bc4103fcd191f9d1c6725ee343be996355c69737cc6b6fd6013af"} Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.247582 4938 scope.go:117] "RemoveContainer" containerID="c3d1c065edf43fd02989f8f9beb2b12bd04b95213634502d25ad620f7c15cc6e" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.247627 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hwr5f" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.260242 4938 scope.go:117] "RemoveContainer" containerID="54bd749a309c29e66a5145025b99e2a21a81ca05ace25b730626e4c7351d589b" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.276306 4938 scope.go:117] "RemoveContainer" containerID="b951df96b39f9185d9508d2dc749b04526b01e508cbf8dfc2d3728cfd352e9e3" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.782977 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed2a3c1c-2ab4-48b9-991b-38847c71f996" (UID: "ed2a3c1c-2ab4-48b9-991b-38847c71f996"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.863578 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2a3c1c-2ab4-48b9-991b-38847c71f996-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.898875 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:43:15 crc kubenswrapper[4938]: I1122 10:43:15.902664 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hwr5f"] Nov 22 10:43:16 crc kubenswrapper[4938]: I1122 10:43:16.456869 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" path="/var/lib/kubelet/pods/ed2a3c1c-2ab4-48b9-991b-38847c71f996/volumes" Nov 22 10:44:11 crc kubenswrapper[4938]: I1122 10:44:11.301145 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:44:11 crc kubenswrapper[4938]: I1122 10:44:11.301589 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:44:41 crc kubenswrapper[4938]: I1122 10:44:41.300819 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:44:41 crc kubenswrapper[4938]: I1122 10:44:41.301524 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.129321 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58"] Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130148 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130164 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130175 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a63f51d-82cc-4c0d-b847-9c3c93ab242c" containerName="pruner" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130183 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a63f51d-82cc-4c0d-b847-9c3c93ab242c" containerName="pruner" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130197 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130206 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130214 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130222 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130232 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130239 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130249 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130256 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130264 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130492 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130506 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130515 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130529 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130536 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130550 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130557 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="extract-content" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130566 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130574 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130587 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130595 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: E1122 10:45:00.130610 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130618 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="extract-utilities" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130732 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c013bd6c-811a-4807-97cc-d5023243b5e3" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130744 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="08937d01-18fd-41a6-a44d-5cae36807fc7" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130757 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc3aa5b9-8485-4d6f-8deb-c3d3c10b58db" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130774 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed2a3c1c-2ab4-48b9-991b-38847c71f996" containerName="registry-server" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.130783 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a63f51d-82cc-4c0d-b847-9c3c93ab242c" containerName="pruner" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.131263 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.134021 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.134340 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.141851 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58"] Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.200322 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7wxv\" (UniqueName: \"kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.200389 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.200558 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.302207 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7wxv\" (UniqueName: \"kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.302554 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.302616 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.304119 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.315302 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.319172 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7wxv\" (UniqueName: \"kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv\") pod \"collect-profiles-29396805-phk58\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.461945 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.634663 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58"] Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.819594 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" event={"ID":"ebae4252-359f-4d1a-9461-8991ec435c0f","Type":"ContainerStarted","Data":"dae273297a76fdfbd54636b312efb2a0a3daa844151d2f2106f0c2a38c177ae7"} Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.819717 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" event={"ID":"ebae4252-359f-4d1a-9461-8991ec435c0f","Type":"ContainerStarted","Data":"03d5183b7a6053948f903a8dd9c9551a8f7e2750f0fb4a25b64679a6faa9a5a3"} Nov 22 10:45:00 crc kubenswrapper[4938]: I1122 10:45:00.835760 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" podStartSLOduration=0.835740454 podStartE2EDuration="835.740454ms" podCreationTimestamp="2025-11-22 10:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:45:00.833548296 +0000 UTC m=+433.301385705" watchObservedRunningTime="2025-11-22 10:45:00.835740454 +0000 UTC m=+433.303577853" Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.822241 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.823952 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jcz58" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="registry-server" containerID="cri-o://bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" gracePeriod=30 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.831570 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.831868 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tqbjm" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="registry-server" containerID="cri-o://fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" gracePeriod=30 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.836673 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.836898 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" containerID="cri-o://68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e" gracePeriod=30 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.838041 4938 generic.go:334] "Generic (PLEG): container finished" podID="ebae4252-359f-4d1a-9461-8991ec435c0f" containerID="dae273297a76fdfbd54636b312efb2a0a3daa844151d2f2106f0c2a38c177ae7" exitCode=0 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.838077 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" event={"ID":"ebae4252-359f-4d1a-9461-8991ec435c0f","Type":"ContainerDied","Data":"dae273297a76fdfbd54636b312efb2a0a3daa844151d2f2106f0c2a38c177ae7"} Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.856509 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hscj2"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.857435 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.860677 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.861527 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pcrdf" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="registry-server" containerID="cri-o://324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d" gracePeriod=30 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.877351 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hscj2"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.883144 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.883350 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l956q" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="registry-server" containerID="cri-o://69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63" gracePeriod=30 Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.930515 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.930579 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:01 crc kubenswrapper[4938]: I1122 10:45:01.930615 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2jtm\" (UniqueName: \"kubernetes.io/projected/88051af2-c7b9-45b2-a1a5-2c1a025a271b-kube-api-access-g2jtm\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.930722 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b is running failed: container process not found" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.931608 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b is running failed: container process not found" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.932730 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b is running failed: container process not found" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.932765 4938 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-tqbjm" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="registry-server" Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.939176 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd is running failed: container process not found" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.939752 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd is running failed: container process not found" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.940140 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd is running failed: container process not found" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:45:01 crc kubenswrapper[4938]: E1122 10:45:01.940173 4938 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-jcz58" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="registry-server" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.032272 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.032372 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.032424 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2jtm\" (UniqueName: \"kubernetes.io/projected/88051af2-c7b9-45b2-a1a5-2c1a025a271b-kube-api-access-g2jtm\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.036496 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.041097 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/88051af2-c7b9-45b2-a1a5-2c1a025a271b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.055871 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2jtm\" (UniqueName: \"kubernetes.io/projected/88051af2-c7b9-45b2-a1a5-2c1a025a271b-kube-api-access-g2jtm\") pod \"marketplace-operator-79b997595-hscj2\" (UID: \"88051af2-c7b9-45b2-a1a5-2c1a025a271b\") " pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.234358 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.303772 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.311944 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.358186 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.359697 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.365193 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437072 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca\") pod \"311f1063-1ead-4575-adce-cbf298b713b0\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437123 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content\") pod \"f5f69615-9ff1-4803-95de-28529afe284f\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437142 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content\") pod \"4381753c-a419-4f63-acc0-6b1bc5529b75\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437199 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97p8p\" (UniqueName: \"kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p\") pod \"4381753c-a419-4f63-acc0-6b1bc5529b75\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437219 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content\") pod \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437236 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities\") pod \"372bd14a-9e2d-4320-92e4-534c44542975\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437282 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z48k2\" (UniqueName: \"kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2\") pod \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437303 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities\") pod \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\" (UID: \"80700ea3-9e71-45b9-8896-12c1ed5d4d00\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437330 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities\") pod \"4381753c-a419-4f63-acc0-6b1bc5529b75\" (UID: \"4381753c-a419-4f63-acc0-6b1bc5529b75\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437349 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdd9b\" (UniqueName: \"kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b\") pod \"372bd14a-9e2d-4320-92e4-534c44542975\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437366 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities\") pod \"f5f69615-9ff1-4803-95de-28529afe284f\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437395 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xf54z\" (UniqueName: \"kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z\") pod \"f5f69615-9ff1-4803-95de-28529afe284f\" (UID: \"f5f69615-9ff1-4803-95de-28529afe284f\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437419 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg92v\" (UniqueName: \"kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v\") pod \"311f1063-1ead-4575-adce-cbf298b713b0\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437444 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics\") pod \"311f1063-1ead-4575-adce-cbf298b713b0\" (UID: \"311f1063-1ead-4575-adce-cbf298b713b0\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.437464 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content\") pod \"372bd14a-9e2d-4320-92e4-534c44542975\" (UID: \"372bd14a-9e2d-4320-92e4-534c44542975\") " Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.440497 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities" (OuterVolumeSpecName: "utilities") pod "4381753c-a419-4f63-acc0-6b1bc5529b75" (UID: "4381753c-a419-4f63-acc0-6b1bc5529b75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.440941 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities" (OuterVolumeSpecName: "utilities") pod "372bd14a-9e2d-4320-92e4-534c44542975" (UID: "372bd14a-9e2d-4320-92e4-534c44542975"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.441085 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities" (OuterVolumeSpecName: "utilities") pod "80700ea3-9e71-45b9-8896-12c1ed5d4d00" (UID: "80700ea3-9e71-45b9-8896-12c1ed5d4d00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.441179 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "311f1063-1ead-4575-adce-cbf298b713b0" (UID: "311f1063-1ead-4575-adce-cbf298b713b0"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.443809 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b" (OuterVolumeSpecName: "kube-api-access-kdd9b") pod "372bd14a-9e2d-4320-92e4-534c44542975" (UID: "372bd14a-9e2d-4320-92e4-534c44542975"). InnerVolumeSpecName "kube-api-access-kdd9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.444050 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p" (OuterVolumeSpecName: "kube-api-access-97p8p") pod "4381753c-a419-4f63-acc0-6b1bc5529b75" (UID: "4381753c-a419-4f63-acc0-6b1bc5529b75"). InnerVolumeSpecName "kube-api-access-97p8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.444740 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2" (OuterVolumeSpecName: "kube-api-access-z48k2") pod "80700ea3-9e71-45b9-8896-12c1ed5d4d00" (UID: "80700ea3-9e71-45b9-8896-12c1ed5d4d00"). InnerVolumeSpecName "kube-api-access-z48k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.450454 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "311f1063-1ead-4575-adce-cbf298b713b0" (UID: "311f1063-1ead-4575-adce-cbf298b713b0"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.451808 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v" (OuterVolumeSpecName: "kube-api-access-qg92v") pod "311f1063-1ead-4575-adce-cbf298b713b0" (UID: "311f1063-1ead-4575-adce-cbf298b713b0"). InnerVolumeSpecName "kube-api-access-qg92v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.453099 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities" (OuterVolumeSpecName: "utilities") pod "f5f69615-9ff1-4803-95de-28529afe284f" (UID: "f5f69615-9ff1-4803-95de-28529afe284f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.455349 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z" (OuterVolumeSpecName: "kube-api-access-xf54z") pod "f5f69615-9ff1-4803-95de-28529afe284f" (UID: "f5f69615-9ff1-4803-95de-28529afe284f"). InnerVolumeSpecName "kube-api-access-xf54z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.477273 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5f69615-9ff1-4803-95de-28529afe284f" (UID: "f5f69615-9ff1-4803-95de-28529afe284f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.512860 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hscj2"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.519991 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4381753c-a419-4f63-acc0-6b1bc5529b75" (UID: "4381753c-a419-4f63-acc0-6b1bc5529b75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.523817 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "372bd14a-9e2d-4320-92e4-534c44542975" (UID: "372bd14a-9e2d-4320-92e4-534c44542975"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.538965 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97p8p\" (UniqueName: \"kubernetes.io/projected/4381753c-a419-4f63-acc0-6b1bc5529b75-kube-api-access-97p8p\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.538993 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539003 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z48k2\" (UniqueName: \"kubernetes.io/projected/80700ea3-9e71-45b9-8896-12c1ed5d4d00-kube-api-access-z48k2\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539013 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539022 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539030 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdd9b\" (UniqueName: \"kubernetes.io/projected/372bd14a-9e2d-4320-92e4-534c44542975-kube-api-access-kdd9b\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539037 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539045 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xf54z\" (UniqueName: \"kubernetes.io/projected/f5f69615-9ff1-4803-95de-28529afe284f-kube-api-access-xf54z\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539053 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg92v\" (UniqueName: \"kubernetes.io/projected/311f1063-1ead-4575-adce-cbf298b713b0-kube-api-access-qg92v\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539062 4938 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539071 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/372bd14a-9e2d-4320-92e4-534c44542975-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539081 4938 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/311f1063-1ead-4575-adce-cbf298b713b0-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539089 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4381753c-a419-4f63-acc0-6b1bc5529b75-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.539098 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5f69615-9ff1-4803-95de-28529afe284f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.555449 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80700ea3-9e71-45b9-8896-12c1ed5d4d00" (UID: "80700ea3-9e71-45b9-8896-12c1ed5d4d00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.639654 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80700ea3-9e71-45b9-8896-12c1ed5d4d00-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.845039 4938 generic.go:334] "Generic (PLEG): container finished" podID="f5f69615-9ff1-4803-95de-28529afe284f" containerID="324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d" exitCode=0 Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.845099 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerDied","Data":"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.845127 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pcrdf" event={"ID":"f5f69615-9ff1-4803-95de-28529afe284f","Type":"ContainerDied","Data":"66cc4a05e7b5b824b574b882a8d03b7f3f982627c941aafc2af819eb8dc6845e"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.845143 4938 scope.go:117] "RemoveContainer" containerID="324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.845282 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pcrdf" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.849555 4938 generic.go:334] "Generic (PLEG): container finished" podID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerID="69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63" exitCode=0 Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.849627 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerDied","Data":"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.849656 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l956q" event={"ID":"80700ea3-9e71-45b9-8896-12c1ed5d4d00","Type":"ContainerDied","Data":"a85692ab917b8f5bc5b350e4c29866dad38a861c914a5e05b64fb5f4931c1d9b"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.849853 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l956q" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.851284 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" event={"ID":"88051af2-c7b9-45b2-a1a5-2c1a025a271b","Type":"ContainerStarted","Data":"b311e07dc1243e211bb9e44f0dc6e47fe7fc709324bed0828461859da155de8a"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.851353 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" event={"ID":"88051af2-c7b9-45b2-a1a5-2c1a025a271b","Type":"ContainerStarted","Data":"d2aff61e07789363a805d35508ae17ce47ddc850bb8b8983dbe0ac067059061e"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.851656 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.852438 4938 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-hscj2 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" start-of-body= Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.852469 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" podUID="88051af2-c7b9-45b2-a1a5-2c1a025a271b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.853336 4938 generic.go:334] "Generic (PLEG): container finished" podID="311f1063-1ead-4575-adce-cbf298b713b0" containerID="68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e" exitCode=0 Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.853391 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" event={"ID":"311f1063-1ead-4575-adce-cbf298b713b0","Type":"ContainerDied","Data":"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.853428 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" event={"ID":"311f1063-1ead-4575-adce-cbf298b713b0","Type":"ContainerDied","Data":"f02ff52c9e04d22ee09987b0322f2cee2ef2a6a72f032105f056d2550a7bdf04"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.853463 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-jg5zd" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.855756 4938 generic.go:334] "Generic (PLEG): container finished" podID="372bd14a-9e2d-4320-92e4-534c44542975" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" exitCode=0 Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.855819 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcz58" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.855826 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerDied","Data":"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.855931 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcz58" event={"ID":"372bd14a-9e2d-4320-92e4-534c44542975","Type":"ContainerDied","Data":"c5e5587e5d055d495540daf404d72586ab15443a5d19b70d39af03a8672c0ac9"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.859529 4938 generic.go:334] "Generic (PLEG): container finished" podID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" exitCode=0 Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.859774 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tqbjm" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.862556 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerDied","Data":"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.862639 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tqbjm" event={"ID":"4381753c-a419-4f63-acc0-6b1bc5529b75","Type":"ContainerDied","Data":"4983d09124f47004182739f4b828b721eead49f83c47d86c9e21a0e475e4bb53"} Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.876149 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" podStartSLOduration=1.876109561 podStartE2EDuration="1.876109561s" podCreationTimestamp="2025-11-22 10:45:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:45:02.872568127 +0000 UTC m=+435.340405536" watchObservedRunningTime="2025-11-22 10:45:02.876109561 +0000 UTC m=+435.343946960" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.880295 4938 scope.go:117] "RemoveContainer" containerID="a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.896124 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.901829 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-jg5zd"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.920984 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.922123 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l956q"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.931252 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.937442 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tqbjm"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.939172 4938 scope.go:117] "RemoveContainer" containerID="f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.960466 4938 scope.go:117] "RemoveContainer" containerID="324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d" Nov 22 10:45:02 crc kubenswrapper[4938]: E1122 10:45:02.960966 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d\": container with ID starting with 324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d not found: ID does not exist" containerID="324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961001 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d"} err="failed to get container status \"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d\": rpc error: code = NotFound desc = could not find container \"324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d\": container with ID starting with 324717896d9da8a89187f1e9bac97624dd7dcac129764bb50cd849980fba2e2d not found: ID does not exist" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961034 4938 scope.go:117] "RemoveContainer" containerID="a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa" Nov 22 10:45:02 crc kubenswrapper[4938]: E1122 10:45:02.961427 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa\": container with ID starting with a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa not found: ID does not exist" containerID="a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961451 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa"} err="failed to get container status \"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa\": rpc error: code = NotFound desc = could not find container \"a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa\": container with ID starting with a7c2afdc77ce71eccce835986d6979664cdf02ab602dbc9b97c0d74733bdb7fa not found: ID does not exist" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961472 4938 scope.go:117] "RemoveContainer" containerID="f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d" Nov 22 10:45:02 crc kubenswrapper[4938]: E1122 10:45:02.961764 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d\": container with ID starting with f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d not found: ID does not exist" containerID="f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961780 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d"} err="failed to get container status \"f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d\": rpc error: code = NotFound desc = could not find container \"f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d\": container with ID starting with f48b4bcccd732bf0210ca67bea06de28e3fdc7dc5688094ea49c2326beb4c16d not found: ID does not exist" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.961791 4938 scope.go:117] "RemoveContainer" containerID="69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63" Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.976346 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.976408 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jcz58"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.976423 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:45:02 crc kubenswrapper[4938]: I1122 10:45:02.979506 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pcrdf"] Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.017898 4938 scope.go:117] "RemoveContainer" containerID="1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.033376 4938 scope.go:117] "RemoveContainer" containerID="57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.049131 4938 scope.go:117] "RemoveContainer" containerID="69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.049706 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63\": container with ID starting with 69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63 not found: ID does not exist" containerID="69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.049778 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63"} err="failed to get container status \"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63\": rpc error: code = NotFound desc = could not find container \"69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63\": container with ID starting with 69cf2a8d3d124450a88e9f8051311f8f11b1b668450628305538bcfd1a050c63 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.049809 4938 scope.go:117] "RemoveContainer" containerID="1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.051985 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5\": container with ID starting with 1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5 not found: ID does not exist" containerID="1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.052015 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5"} err="failed to get container status \"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5\": rpc error: code = NotFound desc = could not find container \"1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5\": container with ID starting with 1eb2f59a2a71aada26c8a3db3e70803fe5242f58fcf4eaa9402a230bd402cac5 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.052032 4938 scope.go:117] "RemoveContainer" containerID="57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.053007 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2\": container with ID starting with 57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2 not found: ID does not exist" containerID="57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.053056 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2"} err="failed to get container status \"57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2\": rpc error: code = NotFound desc = could not find container \"57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2\": container with ID starting with 57977bc8313c73133e1362be24eb0d04f996e2d701c9dcc39e1572e563a84de2 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.053094 4938 scope.go:117] "RemoveContainer" containerID="68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.071687 4938 scope.go:117] "RemoveContainer" containerID="68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.072364 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e\": container with ID starting with 68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e not found: ID does not exist" containerID="68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.072405 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e"} err="failed to get container status \"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e\": rpc error: code = NotFound desc = could not find container \"68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e\": container with ID starting with 68decc47dfd4c5c26bfe02c10f20b31ba93cd3ea6c1364900d3463c4083b5c5e not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.072435 4938 scope.go:117] "RemoveContainer" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.084828 4938 scope.go:117] "RemoveContainer" containerID="54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.095256 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.100611 4938 scope.go:117] "RemoveContainer" containerID="7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.116607 4938 scope.go:117] "RemoveContainer" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.117332 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd\": container with ID starting with bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd not found: ID does not exist" containerID="bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117360 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd"} err="failed to get container status \"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd\": rpc error: code = NotFound desc = could not find container \"bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd\": container with ID starting with bf5b890f57a5845c60c6abdc9f1908d588945aeb753255df3ba8f6649660a5cd not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117381 4938 scope.go:117] "RemoveContainer" containerID="54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.117585 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9\": container with ID starting with 54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9 not found: ID does not exist" containerID="54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117604 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9"} err="failed to get container status \"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9\": rpc error: code = NotFound desc = could not find container \"54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9\": container with ID starting with 54ce364fdd29b1732ece8015ae95a6449609df7d447921356511884a6c788af9 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117616 4938 scope.go:117] "RemoveContainer" containerID="7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.117803 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4\": container with ID starting with 7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4 not found: ID does not exist" containerID="7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117819 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4"} err="failed to get container status \"7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4\": rpc error: code = NotFound desc = could not find container \"7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4\": container with ID starting with 7dfd65bbe0fa6925e5cd72a4d1dfffa304e456fb5544afb12592bf1fe808b0c4 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.117829 4938 scope.go:117] "RemoveContainer" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.136317 4938 scope.go:117] "RemoveContainer" containerID="40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.151026 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume\") pod \"ebae4252-359f-4d1a-9461-8991ec435c0f\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.151094 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume\") pod \"ebae4252-359f-4d1a-9461-8991ec435c0f\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.151118 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7wxv\" (UniqueName: \"kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv\") pod \"ebae4252-359f-4d1a-9461-8991ec435c0f\" (UID: \"ebae4252-359f-4d1a-9461-8991ec435c0f\") " Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.152652 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume" (OuterVolumeSpecName: "config-volume") pod "ebae4252-359f-4d1a-9461-8991ec435c0f" (UID: "ebae4252-359f-4d1a-9461-8991ec435c0f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.156521 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv" (OuterVolumeSpecName: "kube-api-access-m7wxv") pod "ebae4252-359f-4d1a-9461-8991ec435c0f" (UID: "ebae4252-359f-4d1a-9461-8991ec435c0f"). InnerVolumeSpecName "kube-api-access-m7wxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.160070 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ebae4252-359f-4d1a-9461-8991ec435c0f" (UID: "ebae4252-359f-4d1a-9461-8991ec435c0f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.168312 4938 scope.go:117] "RemoveContainer" containerID="26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.187784 4938 scope.go:117] "RemoveContainer" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.188149 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b\": container with ID starting with fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b not found: ID does not exist" containerID="fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.188179 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b"} err="failed to get container status \"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b\": rpc error: code = NotFound desc = could not find container \"fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b\": container with ID starting with fdf65b5705d549b223d31ae99c733d169078506bb01e64dddece8db89279b78b not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.188201 4938 scope.go:117] "RemoveContainer" containerID="40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.188590 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8\": container with ID starting with 40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8 not found: ID does not exist" containerID="40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.188613 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8"} err="failed to get container status \"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8\": rpc error: code = NotFound desc = could not find container \"40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8\": container with ID starting with 40a542fc90f39c459b327a5e3ad4e995b73b7c86578d7cefc90a66f97428f5e8 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.188628 4938 scope.go:117] "RemoveContainer" containerID="26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60" Nov 22 10:45:03 crc kubenswrapper[4938]: E1122 10:45:03.188879 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60\": container with ID starting with 26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60 not found: ID does not exist" containerID="26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.188902 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60"} err="failed to get container status \"26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60\": rpc error: code = NotFound desc = could not find container \"26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60\": container with ID starting with 26f798341fa2c7e7fdb60ec60633ac0e6c8921080247839e0e5536355463ce60 not found: ID does not exist" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.252355 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebae4252-359f-4d1a-9461-8991ec435c0f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.252389 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7wxv\" (UniqueName: \"kubernetes.io/projected/ebae4252-359f-4d1a-9461-8991ec435c0f-kube-api-access-m7wxv\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.252400 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebae4252-359f-4d1a-9461-8991ec435c0f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.614738 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.868342 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.868340 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58" event={"ID":"ebae4252-359f-4d1a-9461-8991ec435c0f","Type":"ContainerDied","Data":"03d5183b7a6053948f903a8dd9c9551a8f7e2750f0fb4a25b64679a6faa9a5a3"} Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.868494 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03d5183b7a6053948f903a8dd9c9551a8f7e2750f0fb4a25b64679a6faa9a5a3" Nov 22 10:45:03 crc kubenswrapper[4938]: I1122 10:45:03.876082 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-hscj2" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042231 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n7p72"] Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042415 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042430 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042441 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042447 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042464 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042476 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042486 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042498 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042506 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042512 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042521 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042527 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042535 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042540 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042548 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042554 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042562 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042569 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042586 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042595 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042606 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042613 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="extract-content" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042624 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042631 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="extract-utilities" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042640 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042647 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: E1122 10:45:04.042654 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebae4252-359f-4d1a-9461-8991ec435c0f" containerName="collect-profiles" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042662 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebae4252-359f-4d1a-9461-8991ec435c0f" containerName="collect-profiles" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042755 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebae4252-359f-4d1a-9461-8991ec435c0f" containerName="collect-profiles" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042769 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042778 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042789 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="311f1063-1ead-4575-adce-cbf298b713b0" containerName="marketplace-operator" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042800 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="372bd14a-9e2d-4320-92e4-534c44542975" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.042808 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5f69615-9ff1-4803-95de-28529afe284f" containerName="registry-server" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.043463 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.045444 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.049603 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n7p72"] Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.162325 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-utilities\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.162674 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgw6x\" (UniqueName: \"kubernetes.io/projected/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-kube-api-access-lgw6x\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.162731 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-catalog-content\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.238494 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7nmjx"] Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.239690 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.242349 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.248073 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nmjx"] Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.264058 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-catalog-content\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.264112 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-utilities\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.264195 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgw6x\" (UniqueName: \"kubernetes.io/projected/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-kube-api-access-lgw6x\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.265025 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-utilities\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.266192 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-catalog-content\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.284843 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgw6x\" (UniqueName: \"kubernetes.io/projected/8e1c77e8-d7ac-457a-945e-1fffe49dc82e-kube-api-access-lgw6x\") pod \"certified-operators-n7p72\" (UID: \"8e1c77e8-d7ac-457a-945e-1fffe49dc82e\") " pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.365661 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-catalog-content\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.365719 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5ps7\" (UniqueName: \"kubernetes.io/projected/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-kube-api-access-z5ps7\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.365780 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-utilities\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.389602 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.462944 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="311f1063-1ead-4575-adce-cbf298b713b0" path="/var/lib/kubelet/pods/311f1063-1ead-4575-adce-cbf298b713b0/volumes" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.465608 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="372bd14a-9e2d-4320-92e4-534c44542975" path="/var/lib/kubelet/pods/372bd14a-9e2d-4320-92e4-534c44542975/volumes" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.466273 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4381753c-a419-4f63-acc0-6b1bc5529b75" path="/var/lib/kubelet/pods/4381753c-a419-4f63-acc0-6b1bc5529b75/volumes" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.467254 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5ps7\" (UniqueName: \"kubernetes.io/projected/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-kube-api-access-z5ps7\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.467367 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-utilities\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.467469 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-catalog-content\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.467978 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-utilities\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.467994 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-catalog-content\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.468176 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80700ea3-9e71-45b9-8896-12c1ed5d4d00" path="/var/lib/kubelet/pods/80700ea3-9e71-45b9-8896-12c1ed5d4d00/volumes" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.469244 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5f69615-9ff1-4803-95de-28529afe284f" path="/var/lib/kubelet/pods/f5f69615-9ff1-4803-95de-28529afe284f/volumes" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.488709 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5ps7\" (UniqueName: \"kubernetes.io/projected/93c1a9c7-8810-4ee4-977f-c18fc37b10ec-kube-api-access-z5ps7\") pod \"redhat-marketplace-7nmjx\" (UID: \"93c1a9c7-8810-4ee4-977f-c18fc37b10ec\") " pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.552271 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.573585 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n7p72"] Nov 22 10:45:04 crc kubenswrapper[4938]: W1122 10:45:04.578436 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e1c77e8_d7ac_457a_945e_1fffe49dc82e.slice/crio-9972fab71321731f0d768f53fffebbbdbc2150c862282dc904cbef41919afcab WatchSource:0}: Error finding container 9972fab71321731f0d768f53fffebbbdbc2150c862282dc904cbef41919afcab: Status 404 returned error can't find the container with id 9972fab71321731f0d768f53fffebbbdbc2150c862282dc904cbef41919afcab Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.755702 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7nmjx"] Nov 22 10:45:04 crc kubenswrapper[4938]: W1122 10:45:04.806386 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93c1a9c7_8810_4ee4_977f_c18fc37b10ec.slice/crio-7e97500635ae43fcbc8ca1a38d917535ca27a1650faf2ac8c55d4af4f3ef5d08 WatchSource:0}: Error finding container 7e97500635ae43fcbc8ca1a38d917535ca27a1650faf2ac8c55d4af4f3ef5d08: Status 404 returned error can't find the container with id 7e97500635ae43fcbc8ca1a38d917535ca27a1650faf2ac8c55d4af4f3ef5d08 Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.877109 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nmjx" event={"ID":"93c1a9c7-8810-4ee4-977f-c18fc37b10ec","Type":"ContainerStarted","Data":"7e97500635ae43fcbc8ca1a38d917535ca27a1650faf2ac8c55d4af4f3ef5d08"} Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.878745 4938 generic.go:334] "Generic (PLEG): container finished" podID="8e1c77e8-d7ac-457a-945e-1fffe49dc82e" containerID="186d13620e6d667c55a160e6e746e7478e5aaf5d9dfd427e5d39a734edfc96f1" exitCode=0 Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.879582 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n7p72" event={"ID":"8e1c77e8-d7ac-457a-945e-1fffe49dc82e","Type":"ContainerDied","Data":"186d13620e6d667c55a160e6e746e7478e5aaf5d9dfd427e5d39a734edfc96f1"} Nov 22 10:45:04 crc kubenswrapper[4938]: I1122 10:45:04.879599 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n7p72" event={"ID":"8e1c77e8-d7ac-457a-945e-1fffe49dc82e","Type":"ContainerStarted","Data":"9972fab71321731f0d768f53fffebbbdbc2150c862282dc904cbef41919afcab"} Nov 22 10:45:05 crc kubenswrapper[4938]: I1122 10:45:05.885955 4938 generic.go:334] "Generic (PLEG): container finished" podID="93c1a9c7-8810-4ee4-977f-c18fc37b10ec" containerID="7e44c9938dee03477ecda1ffcdc1ef7e4d5a34a8e19afbac67f20ea423c8a208" exitCode=0 Nov 22 10:45:05 crc kubenswrapper[4938]: I1122 10:45:05.886248 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nmjx" event={"ID":"93c1a9c7-8810-4ee4-977f-c18fc37b10ec","Type":"ContainerDied","Data":"7e44c9938dee03477ecda1ffcdc1ef7e4d5a34a8e19afbac67f20ea423c8a208"} Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.439965 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qjkpw"] Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.441095 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.442694 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.465997 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjkpw"] Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.489060 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-catalog-content\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.489129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-utilities\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.489189 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwtnz\" (UniqueName: \"kubernetes.io/projected/6778dbfd-75df-4429-9a78-4ea6225eae52-kube-api-access-zwtnz\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.590297 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-catalog-content\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.590349 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-utilities\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.590386 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwtnz\" (UniqueName: \"kubernetes.io/projected/6778dbfd-75df-4429-9a78-4ea6225eae52-kube-api-access-zwtnz\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.590862 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-catalog-content\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.591115 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6778dbfd-75df-4429-9a78-4ea6225eae52-utilities\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.609259 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwtnz\" (UniqueName: \"kubernetes.io/projected/6778dbfd-75df-4429-9a78-4ea6225eae52-kube-api-access-zwtnz\") pod \"community-operators-qjkpw\" (UID: \"6778dbfd-75df-4429-9a78-4ea6225eae52\") " pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.639460 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lxs6w"] Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.641116 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.648996 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.652034 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxs6w"] Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.691082 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpd5h\" (UniqueName: \"kubernetes.io/projected/f4406526-10df-413c-87df-4aa065d6ecfb-kube-api-access-kpd5h\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.691130 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-catalog-content\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.691152 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-utilities\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.792167 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-utilities\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.792268 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpd5h\" (UniqueName: \"kubernetes.io/projected/f4406526-10df-413c-87df-4aa065d6ecfb-kube-api-access-kpd5h\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.792292 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-catalog-content\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.792660 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-utilities\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.792713 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4406526-10df-413c-87df-4aa065d6ecfb-catalog-content\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.796147 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.809039 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpd5h\" (UniqueName: \"kubernetes.io/projected/f4406526-10df-413c-87df-4aa065d6ecfb-kube-api-access-kpd5h\") pod \"redhat-operators-lxs6w\" (UID: \"f4406526-10df-413c-87df-4aa065d6ecfb\") " pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:06 crc kubenswrapper[4938]: I1122 10:45:06.962577 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:07 crc kubenswrapper[4938]: I1122 10:45:07.765827 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxs6w"] Nov 22 10:45:07 crc kubenswrapper[4938]: W1122 10:45:07.791226 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4406526_10df_413c_87df_4aa065d6ecfb.slice/crio-a6eb3ffe60b28c81f8cf220cd9f69b479fa8eb7662328802eebf51139f380b42 WatchSource:0}: Error finding container a6eb3ffe60b28c81f8cf220cd9f69b479fa8eb7662328802eebf51139f380b42: Status 404 returned error can't find the container with id a6eb3ffe60b28c81f8cf220cd9f69b479fa8eb7662328802eebf51139f380b42 Nov 22 10:45:07 crc kubenswrapper[4938]: I1122 10:45:07.900371 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxs6w" event={"ID":"f4406526-10df-413c-87df-4aa065d6ecfb","Type":"ContainerStarted","Data":"a6eb3ffe60b28c81f8cf220cd9f69b479fa8eb7662328802eebf51139f380b42"} Nov 22 10:45:07 crc kubenswrapper[4938]: I1122 10:45:07.902276 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n7p72" event={"ID":"8e1c77e8-d7ac-457a-945e-1fffe49dc82e","Type":"ContainerStarted","Data":"3beee41ff8528c6a26a822a69092aa210d4abd57d99696ea392525f119eb0f34"} Nov 22 10:45:07 crc kubenswrapper[4938]: I1122 10:45:07.991315 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjkpw"] Nov 22 10:45:07 crc kubenswrapper[4938]: W1122 10:45:07.998369 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6778dbfd_75df_4429_9a78_4ea6225eae52.slice/crio-9d25b3d5f3a0e5c17b9525e36ff89d66423cc455461aca39d487bf26f7bf97e0 WatchSource:0}: Error finding container 9d25b3d5f3a0e5c17b9525e36ff89d66423cc455461aca39d487bf26f7bf97e0: Status 404 returned error can't find the container with id 9d25b3d5f3a0e5c17b9525e36ff89d66423cc455461aca39d487bf26f7bf97e0 Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.909062 4938 generic.go:334] "Generic (PLEG): container finished" podID="6778dbfd-75df-4429-9a78-4ea6225eae52" containerID="5435eeb99316445ad7c4d8634559bc21f5d77d4e1c1a9c020356ebd526082ab7" exitCode=0 Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.909176 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjkpw" event={"ID":"6778dbfd-75df-4429-9a78-4ea6225eae52","Type":"ContainerDied","Data":"5435eeb99316445ad7c4d8634559bc21f5d77d4e1c1a9c020356ebd526082ab7"} Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.909706 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjkpw" event={"ID":"6778dbfd-75df-4429-9a78-4ea6225eae52","Type":"ContainerStarted","Data":"9d25b3d5f3a0e5c17b9525e36ff89d66423cc455461aca39d487bf26f7bf97e0"} Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.913906 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nmjx" event={"ID":"93c1a9c7-8810-4ee4-977f-c18fc37b10ec","Type":"ContainerStarted","Data":"fc523e589032d330d285edfcebdeb44697af76ba5eba3f9de4188a7a749eeba1"} Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.917026 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4406526-10df-413c-87df-4aa065d6ecfb" containerID="93f8ff0ec081c720f48414029ab61852921765542661fdce100adca5308bd1f8" exitCode=0 Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.917108 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxs6w" event={"ID":"f4406526-10df-413c-87df-4aa065d6ecfb","Type":"ContainerDied","Data":"93f8ff0ec081c720f48414029ab61852921765542661fdce100adca5308bd1f8"} Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.921617 4938 generic.go:334] "Generic (PLEG): container finished" podID="8e1c77e8-d7ac-457a-945e-1fffe49dc82e" containerID="3beee41ff8528c6a26a822a69092aa210d4abd57d99696ea392525f119eb0f34" exitCode=0 Nov 22 10:45:08 crc kubenswrapper[4938]: I1122 10:45:08.921657 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n7p72" event={"ID":"8e1c77e8-d7ac-457a-945e-1fffe49dc82e","Type":"ContainerDied","Data":"3beee41ff8528c6a26a822a69092aa210d4abd57d99696ea392525f119eb0f34"} Nov 22 10:45:09 crc kubenswrapper[4938]: I1122 10:45:09.928454 4938 generic.go:334] "Generic (PLEG): container finished" podID="93c1a9c7-8810-4ee4-977f-c18fc37b10ec" containerID="fc523e589032d330d285edfcebdeb44697af76ba5eba3f9de4188a7a749eeba1" exitCode=0 Nov 22 10:45:09 crc kubenswrapper[4938]: I1122 10:45:09.928536 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nmjx" event={"ID":"93c1a9c7-8810-4ee4-977f-c18fc37b10ec","Type":"ContainerDied","Data":"fc523e589032d330d285edfcebdeb44697af76ba5eba3f9de4188a7a749eeba1"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.300720 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.301306 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.301356 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.301895 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.301991 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b" gracePeriod=600 Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.952976 4938 generic.go:334] "Generic (PLEG): container finished" podID="6778dbfd-75df-4429-9a78-4ea6225eae52" containerID="80db2b257139f8e4c90a817f0aea55d89c0503876cc528a5c8ceeec7728347ea" exitCode=0 Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.953028 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjkpw" event={"ID":"6778dbfd-75df-4429-9a78-4ea6225eae52","Type":"ContainerDied","Data":"80db2b257139f8e4c90a817f0aea55d89c0503876cc528a5c8ceeec7728347ea"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.955309 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7nmjx" event={"ID":"93c1a9c7-8810-4ee4-977f-c18fc37b10ec","Type":"ContainerStarted","Data":"034bb558c4e10234a9009ca847363be9b163fb522cc05c15517eb9897319fd39"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.957183 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxs6w" event={"ID":"f4406526-10df-413c-87df-4aa065d6ecfb","Type":"ContainerStarted","Data":"472c1671198f9e07b6137fc01fe3f6ecb8fa117b5ffd88f41ab89dbf0781f3b0"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.962686 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n7p72" event={"ID":"8e1c77e8-d7ac-457a-945e-1fffe49dc82e","Type":"ContainerStarted","Data":"2c69d2a659bd52ce3821717ab62e876df6feed0e32b683c81640c39b803ade6c"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.967529 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b" exitCode=0 Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.967562 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.967579 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec"} Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.967596 4938 scope.go:117] "RemoveContainer" containerID="e2ae1ed74c169e31396bb96e1ca0707b522e243c7e5ffcfa6009578ceb82d07f" Nov 22 10:45:11 crc kubenswrapper[4938]: I1122 10:45:11.993036 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7nmjx" podStartSLOduration=2.819327742 podStartE2EDuration="7.993019392s" podCreationTimestamp="2025-11-22 10:45:04 +0000 UTC" firstStartedPulling="2025-11-22 10:45:05.887956077 +0000 UTC m=+438.355793476" lastFinishedPulling="2025-11-22 10:45:11.061647727 +0000 UTC m=+443.529485126" observedRunningTime="2025-11-22 10:45:11.990707431 +0000 UTC m=+444.458544830" watchObservedRunningTime="2025-11-22 10:45:11.993019392 +0000 UTC m=+444.460856791" Nov 22 10:45:12 crc kubenswrapper[4938]: I1122 10:45:12.053651 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n7p72" podStartSLOduration=2.657925511 podStartE2EDuration="8.053632708s" podCreationTimestamp="2025-11-22 10:45:04 +0000 UTC" firstStartedPulling="2025-11-22 10:45:04.880323614 +0000 UTC m=+437.348161013" lastFinishedPulling="2025-11-22 10:45:10.276030811 +0000 UTC m=+442.743868210" observedRunningTime="2025-11-22 10:45:12.050970788 +0000 UTC m=+444.518808187" watchObservedRunningTime="2025-11-22 10:45:12.053632708 +0000 UTC m=+444.521470107" Nov 22 10:45:12 crc kubenswrapper[4938]: I1122 10:45:12.975430 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjkpw" event={"ID":"6778dbfd-75df-4429-9a78-4ea6225eae52","Type":"ContainerStarted","Data":"48d74f840a38ee1292dd82e640c56c950282280730e0f955ab037c59e365778e"} Nov 22 10:45:12 crc kubenswrapper[4938]: I1122 10:45:12.977155 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4406526-10df-413c-87df-4aa065d6ecfb" containerID="472c1671198f9e07b6137fc01fe3f6ecb8fa117b5ffd88f41ab89dbf0781f3b0" exitCode=0 Nov 22 10:45:12 crc kubenswrapper[4938]: I1122 10:45:12.978151 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxs6w" event={"ID":"f4406526-10df-413c-87df-4aa065d6ecfb","Type":"ContainerDied","Data":"472c1671198f9e07b6137fc01fe3f6ecb8fa117b5ffd88f41ab89dbf0781f3b0"} Nov 22 10:45:13 crc kubenswrapper[4938]: I1122 10:45:13.984004 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxs6w" event={"ID":"f4406526-10df-413c-87df-4aa065d6ecfb","Type":"ContainerStarted","Data":"fcfc9a40e796ae1d97b8b5cd6d825fc51cef5ac9b0535373318d290e3d7ca4d6"} Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.001153 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qjkpw" podStartSLOduration=4.264783284 podStartE2EDuration="8.001137189s" podCreationTimestamp="2025-11-22 10:45:06 +0000 UTC" firstStartedPulling="2025-11-22 10:45:08.912351803 +0000 UTC m=+441.380189202" lastFinishedPulling="2025-11-22 10:45:12.648705708 +0000 UTC m=+445.116543107" observedRunningTime="2025-11-22 10:45:13.998341206 +0000 UTC m=+446.466178605" watchObservedRunningTime="2025-11-22 10:45:14.001137189 +0000 UTC m=+446.468974588" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.390464 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.390885 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.453508 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.554107 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.554156 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:14 crc kubenswrapper[4938]: I1122 10:45:14.591943 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.797346 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.798091 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.862661 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.882033 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lxs6w" podStartSLOduration=6.022867727 podStartE2EDuration="10.882014937s" podCreationTimestamp="2025-11-22 10:45:06 +0000 UTC" firstStartedPulling="2025-11-22 10:45:08.919301656 +0000 UTC m=+441.387139055" lastFinishedPulling="2025-11-22 10:45:13.778448846 +0000 UTC m=+446.246286265" observedRunningTime="2025-11-22 10:45:15.018068326 +0000 UTC m=+447.485905745" watchObservedRunningTime="2025-11-22 10:45:16.882014937 +0000 UTC m=+449.349852336" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.962777 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:16 crc kubenswrapper[4938]: I1122 10:45:16.962836 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:17 crc kubenswrapper[4938]: I1122 10:45:17.048617 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qjkpw" Nov 22 10:45:18 crc kubenswrapper[4938]: I1122 10:45:18.001469 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lxs6w" podUID="f4406526-10df-413c-87df-4aa065d6ecfb" containerName="registry-server" probeResult="failure" output=< Nov 22 10:45:18 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 10:45:18 crc kubenswrapper[4938]: > Nov 22 10:45:24 crc kubenswrapper[4938]: I1122 10:45:24.430575 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n7p72" Nov 22 10:45:24 crc kubenswrapper[4938]: I1122 10:45:24.592194 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7nmjx" Nov 22 10:45:26 crc kubenswrapper[4938]: I1122 10:45:26.997187 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:27 crc kubenswrapper[4938]: I1122 10:45:27.043395 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lxs6w" Nov 22 10:45:28 crc kubenswrapper[4938]: I1122 10:45:28.645114 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" containerName="oauth-openshift" containerID="cri-o://61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a" gracePeriod=15 Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.009611 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.036737 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-fd9565bb5-4v9bp"] Nov 22 10:45:29 crc kubenswrapper[4938]: E1122 10:45:29.036953 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" containerName="oauth-openshift" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.036964 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" containerName="oauth-openshift" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.037056 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" containerName="oauth-openshift" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.037387 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.059967 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-fd9565bb5-4v9bp"] Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.065237 4938 generic.go:334] "Generic (PLEG): container finished" podID="d9b35c60-eb15-4473-9098-b44308dd3926" containerID="61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a" exitCode=0 Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.065274 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.065280 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" event={"ID":"d9b35c60-eb15-4473-9098-b44308dd3926","Type":"ContainerDied","Data":"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a"} Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.065308 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-wm7ff" event={"ID":"d9b35c60-eb15-4473-9098-b44308dd3926","Type":"ContainerDied","Data":"835df655d776a0e01e1a3406db8fdec8db5869184c8e270fec02b15a936ac709"} Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.065334 4938 scope.go:117] "RemoveContainer" containerID="61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.085891 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.087485 4938 scope.go:117] "RemoveContainer" containerID="61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.087979 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6q8c\" (UniqueName: \"kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088038 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088068 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088091 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088114 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088157 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088187 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088213 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088245 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088282 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088309 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088336 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088383 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs\") pod \"d9b35c60-eb15-4473-9098-b44308dd3926\" (UID: \"d9b35c60-eb15-4473-9098-b44308dd3926\") " Nov 22 10:45:29 crc kubenswrapper[4938]: E1122 10:45:29.088599 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a\": container with ID starting with 61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a not found: ID does not exist" containerID="61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.088626 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a"} err="failed to get container status \"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a\": rpc error: code = NotFound desc = could not find container \"61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a\": container with ID starting with 61db2160707ed1c743fbaba2a4e2777bdd15178bad2f56531285e3dfd8bc503a not found: ID does not exist" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.089227 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.089295 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.089472 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.089800 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090088 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090173 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-error\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090230 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-login\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090262 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-session\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090292 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-policies\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090315 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090337 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-dir\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090359 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090439 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090467 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090500 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-router-certs\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090522 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fsk8\" (UniqueName: \"kubernetes.io/projected/b14ca097-9675-4c78-8aa1-c81df43f8df2-kube-api-access-7fsk8\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090544 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-service-ca\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090598 4938 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090612 4938 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d9b35c60-eb15-4473-9098-b44308dd3926-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090623 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.090646 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.091303 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.093131 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c" (OuterVolumeSpecName: "kube-api-access-d6q8c") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "kube-api-access-d6q8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.093341 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.093507 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.094201 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.101466 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.101567 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.101771 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.119161 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.127367 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "d9b35c60-eb15-4473-9098-b44308dd3926" (UID: "d9b35c60-eb15-4473-9098-b44308dd3926"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191614 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-login\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191657 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-session\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191681 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-policies\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191700 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191715 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-dir\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191769 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191786 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191806 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-router-certs\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191823 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fsk8\" (UniqueName: \"kubernetes.io/projected/b14ca097-9675-4c78-8aa1-c81df43f8df2-kube-api-access-7fsk8\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191838 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-service-ca\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191858 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191875 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191897 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-error\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191951 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191962 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191972 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6q8c\" (UniqueName: \"kubernetes.io/projected/d9b35c60-eb15-4473-9098-b44308dd3926-kube-api-access-d6q8c\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191982 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.191991 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.192002 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.192011 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.192022 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.192031 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.192041 4938 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d9b35c60-eb15-4473-9098-b44308dd3926-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.193206 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-dir\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.193449 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.193974 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-audit-policies\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.194550 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-service-ca\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.194614 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.194754 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-error\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.195868 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.195950 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-session\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.196082 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.197301 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.197848 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.198349 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-system-router-certs\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.198653 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b14ca097-9675-4c78-8aa1-c81df43f8df2-v4-0-config-user-template-login\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.209211 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fsk8\" (UniqueName: \"kubernetes.io/projected/b14ca097-9675-4c78-8aa1-c81df43f8df2-kube-api-access-7fsk8\") pod \"oauth-openshift-fd9565bb5-4v9bp\" (UID: \"b14ca097-9675-4c78-8aa1-c81df43f8df2\") " pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.364391 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.392901 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.396574 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-wm7ff"] Nov 22 10:45:29 crc kubenswrapper[4938]: I1122 10:45:29.607831 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-fd9565bb5-4v9bp"] Nov 22 10:45:29 crc kubenswrapper[4938]: W1122 10:45:29.611596 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb14ca097_9675_4c78_8aa1_c81df43f8df2.slice/crio-ce7d20af6157c5cfaac477eff6d13373658ae450e0d8b1d458d0c440ebe031a8 WatchSource:0}: Error finding container ce7d20af6157c5cfaac477eff6d13373658ae450e0d8b1d458d0c440ebe031a8: Status 404 returned error can't find the container with id ce7d20af6157c5cfaac477eff6d13373658ae450e0d8b1d458d0c440ebe031a8 Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.072294 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" event={"ID":"b14ca097-9675-4c78-8aa1-c81df43f8df2","Type":"ContainerStarted","Data":"ea6f216aeba3bcf521d0b302e716c2d8cf345bfdae4c52a9e78455741d5952a5"} Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.072587 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" event={"ID":"b14ca097-9675-4c78-8aa1-c81df43f8df2","Type":"ContainerStarted","Data":"ce7d20af6157c5cfaac477eff6d13373658ae450e0d8b1d458d0c440ebe031a8"} Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.072602 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.082334 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.091035 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-fd9565bb5-4v9bp" podStartSLOduration=27.09101473 podStartE2EDuration="27.09101473s" podCreationTimestamp="2025-11-22 10:45:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:45:30.089018659 +0000 UTC m=+462.556856078" watchObservedRunningTime="2025-11-22 10:45:30.09101473 +0000 UTC m=+462.558852139" Nov 22 10:45:30 crc kubenswrapper[4938]: I1122 10:45:30.454252 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9b35c60-eb15-4473-9098-b44308dd3926" path="/var/lib/kubelet/pods/d9b35c60-eb15-4473-9098-b44308dd3926/volumes" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.706837 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-w5mbg"] Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.709276 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.729384 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-w5mbg"] Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.782731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783080 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-certificates\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783214 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f556a893-a4e7-46ef-a367-49b2e7fa7a28-installation-pull-secrets\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783407 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-tls\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783554 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsqqb\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-kube-api-access-qsqqb\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783681 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-bound-sa-token\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783801 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-trusted-ca\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.783935 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f556a893-a4e7-46ef-a367-49b2e7fa7a28-ca-trust-extracted\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.856063 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884733 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f556a893-a4e7-46ef-a367-49b2e7fa7a28-installation-pull-secrets\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884787 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-tls\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884818 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsqqb\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-kube-api-access-qsqqb\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884844 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-bound-sa-token\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884860 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-trusted-ca\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884878 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f556a893-a4e7-46ef-a367-49b2e7fa7a28-ca-trust-extracted\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.884934 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-certificates\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.885844 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f556a893-a4e7-46ef-a367-49b2e7fa7a28-ca-trust-extracted\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.886755 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-trusted-ca\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.886842 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-certificates\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.899628 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f556a893-a4e7-46ef-a367-49b2e7fa7a28-installation-pull-secrets\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.899752 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-registry-tls\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.902744 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-bound-sa-token\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:16 crc kubenswrapper[4938]: I1122 10:46:16.903417 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsqqb\" (UniqueName: \"kubernetes.io/projected/f556a893-a4e7-46ef-a367-49b2e7fa7a28-kube-api-access-qsqqb\") pod \"image-registry-66df7c8f76-w5mbg\" (UID: \"f556a893-a4e7-46ef-a367-49b2e7fa7a28\") " pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:17 crc kubenswrapper[4938]: I1122 10:46:17.028305 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:17 crc kubenswrapper[4938]: I1122 10:46:17.297034 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-w5mbg"] Nov 22 10:46:17 crc kubenswrapper[4938]: I1122 10:46:17.341922 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" event={"ID":"f556a893-a4e7-46ef-a367-49b2e7fa7a28","Type":"ContainerStarted","Data":"23f76234ebc56b3a217eaf141e3b24dfa094257b685ffeb22e0c1ba659e6bf59"} Nov 22 10:46:18 crc kubenswrapper[4938]: I1122 10:46:18.351069 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" event={"ID":"f556a893-a4e7-46ef-a367-49b2e7fa7a28","Type":"ContainerStarted","Data":"24c711fbc306d60d2f29a6b653c3359b5fc44122a0f51b4b0a0b04ddc5b4a6ca"} Nov 22 10:46:18 crc kubenswrapper[4938]: I1122 10:46:18.351469 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:18 crc kubenswrapper[4938]: I1122 10:46:18.372339 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" podStartSLOduration=2.372324122 podStartE2EDuration="2.372324122s" podCreationTimestamp="2025-11-22 10:46:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:46:18.370748822 +0000 UTC m=+510.838586251" watchObservedRunningTime="2025-11-22 10:46:18.372324122 +0000 UTC m=+510.840161521" Nov 22 10:46:37 crc kubenswrapper[4938]: I1122 10:46:37.032570 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-w5mbg" Nov 22 10:46:37 crc kubenswrapper[4938]: I1122 10:46:37.093157 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.134844 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" podUID="7cf5d758-b959-49f9-8e98-6f84ef428081" containerName="registry" containerID="cri-o://da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07" gracePeriod=30 Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.453699 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.584344 4938 generic.go:334] "Generic (PLEG): container finished" podID="7cf5d758-b959-49f9-8e98-6f84ef428081" containerID="da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07" exitCode=0 Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.584392 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" event={"ID":"7cf5d758-b959-49f9-8e98-6f84ef428081","Type":"ContainerDied","Data":"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07"} Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.584450 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" event={"ID":"7cf5d758-b959-49f9-8e98-6f84ef428081","Type":"ContainerDied","Data":"bde089d98aa6605fcc48fc9a5e381dba24adf9050b4f026ab1f46cc50a973fd2"} Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.584417 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-zr5wf" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.584468 4938 scope.go:117] "RemoveContainer" containerID="da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.600231 4938 scope.go:117] "RemoveContainer" containerID="da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07" Nov 22 10:47:02 crc kubenswrapper[4938]: E1122 10:47:02.600615 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07\": container with ID starting with da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07 not found: ID does not exist" containerID="da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.600659 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07"} err="failed to get container status \"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07\": rpc error: code = NotFound desc = could not find container \"da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07\": container with ID starting with da3ef2a9b9058bff7b7c227fbf231deb9dcb9719f78bf07676ade06cc5973e07 not found: ID does not exist" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627012 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627087 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627143 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627196 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627219 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627242 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt9k4\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627265 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.627482 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"7cf5d758-b959-49f9-8e98-6f84ef428081\" (UID: \"7cf5d758-b959-49f9-8e98-6f84ef428081\") " Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.628714 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.629832 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.638010 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.638380 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4" (OuterVolumeSpecName: "kube-api-access-rt9k4") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "kube-api-access-rt9k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.638870 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.639107 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.650593 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.656091 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "7cf5d758-b959-49f9-8e98-6f84ef428081" (UID: "7cf5d758-b959-49f9-8e98-6f84ef428081"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728332 4938 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728582 4938 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7cf5d758-b959-49f9-8e98-6f84ef428081-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728652 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt9k4\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-kube-api-access-rt9k4\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728705 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf5d758-b959-49f9-8e98-6f84ef428081-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728752 4938 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728799 4938 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7cf5d758-b959-49f9-8e98-6f84ef428081-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.728850 4938 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7cf5d758-b959-49f9-8e98-6f84ef428081-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.908803 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:47:02 crc kubenswrapper[4938]: I1122 10:47:02.916743 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-zr5wf"] Nov 22 10:47:04 crc kubenswrapper[4938]: I1122 10:47:04.454418 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cf5d758-b959-49f9-8e98-6f84ef428081" path="/var/lib/kubelet/pods/7cf5d758-b959-49f9-8e98-6f84ef428081/volumes" Nov 22 10:47:11 crc kubenswrapper[4938]: I1122 10:47:11.301306 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:47:11 crc kubenswrapper[4938]: I1122 10:47:11.301404 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:47:41 crc kubenswrapper[4938]: I1122 10:47:41.300703 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:47:41 crc kubenswrapper[4938]: I1122 10:47:41.301158 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.300987 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.301499 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.301545 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.302144 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.302203 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec" gracePeriod=600 Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.948750 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec" exitCode=0 Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.948976 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec"} Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.949329 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f"} Nov 22 10:48:11 crc kubenswrapper[4938]: I1122 10:48:11.949353 4938 scope.go:117] "RemoveContainer" containerID="630364ea11dce6112bc5c154dd1935ce1f62e93a130b99402687bd95f412446b" Nov 22 10:50:11 crc kubenswrapper[4938]: I1122 10:50:11.301017 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:50:11 crc kubenswrapper[4938]: I1122 10:50:11.301894 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.884163 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-25mlz"] Nov 22 10:50:16 crc kubenswrapper[4938]: E1122 10:50:16.884657 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf5d758-b959-49f9-8e98-6f84ef428081" containerName="registry" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.884669 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf5d758-b959-49f9-8e98-6f84ef428081" containerName="registry" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.884764 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf5d758-b959-49f9-8e98-6f84ef428081" containerName="registry" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.885179 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.887385 4938 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-467pj" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.887492 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.888108 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.893662 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-vzl7q"] Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.894727 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-vzl7q" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.896413 4938 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vdshq" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.904173 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-25mlz"] Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.912683 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-vzl7q"] Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.916352 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cj5bz"] Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.917288 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.919827 4938 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-w9pjz" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.934153 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cj5bz"] Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.945511 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfzrn\" (UniqueName: \"kubernetes.io/projected/58946c83-00e0-4427-8232-d44f5f8f10e0-kube-api-access-pfzrn\") pod \"cert-manager-5b446d88c5-vzl7q\" (UID: \"58946c83-00e0-4427-8232-d44f5f8f10e0\") " pod="cert-manager/cert-manager-5b446d88c5-vzl7q" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.945553 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkc4c\" (UniqueName: \"kubernetes.io/projected/3faf7780-9f49-4f23-ac17-454fbeed3e79-kube-api-access-fkc4c\") pod \"cert-manager-cainjector-7f985d654d-25mlz\" (UID: \"3faf7780-9f49-4f23-ac17-454fbeed3e79\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" Nov 22 10:50:16 crc kubenswrapper[4938]: I1122 10:50:16.945574 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dvjd\" (UniqueName: \"kubernetes.io/projected/17da633d-3102-4583-ba74-5c67cfb859c6-kube-api-access-7dvjd\") pod \"cert-manager-webhook-5655c58dd6-cj5bz\" (UID: \"17da633d-3102-4583-ba74-5c67cfb859c6\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.046584 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfzrn\" (UniqueName: \"kubernetes.io/projected/58946c83-00e0-4427-8232-d44f5f8f10e0-kube-api-access-pfzrn\") pod \"cert-manager-5b446d88c5-vzl7q\" (UID: \"58946c83-00e0-4427-8232-d44f5f8f10e0\") " pod="cert-manager/cert-manager-5b446d88c5-vzl7q" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.046643 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkc4c\" (UniqueName: \"kubernetes.io/projected/3faf7780-9f49-4f23-ac17-454fbeed3e79-kube-api-access-fkc4c\") pod \"cert-manager-cainjector-7f985d654d-25mlz\" (UID: \"3faf7780-9f49-4f23-ac17-454fbeed3e79\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.046669 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dvjd\" (UniqueName: \"kubernetes.io/projected/17da633d-3102-4583-ba74-5c67cfb859c6-kube-api-access-7dvjd\") pod \"cert-manager-webhook-5655c58dd6-cj5bz\" (UID: \"17da633d-3102-4583-ba74-5c67cfb859c6\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.067937 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dvjd\" (UniqueName: \"kubernetes.io/projected/17da633d-3102-4583-ba74-5c67cfb859c6-kube-api-access-7dvjd\") pod \"cert-manager-webhook-5655c58dd6-cj5bz\" (UID: \"17da633d-3102-4583-ba74-5c67cfb859c6\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.067988 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkc4c\" (UniqueName: \"kubernetes.io/projected/3faf7780-9f49-4f23-ac17-454fbeed3e79-kube-api-access-fkc4c\") pod \"cert-manager-cainjector-7f985d654d-25mlz\" (UID: \"3faf7780-9f49-4f23-ac17-454fbeed3e79\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.068034 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfzrn\" (UniqueName: \"kubernetes.io/projected/58946c83-00e0-4427-8232-d44f5f8f10e0-kube-api-access-pfzrn\") pod \"cert-manager-5b446d88c5-vzl7q\" (UID: \"58946c83-00e0-4427-8232-d44f5f8f10e0\") " pod="cert-manager/cert-manager-5b446d88c5-vzl7q" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.210376 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.218998 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-vzl7q" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.234489 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.519469 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cj5bz"] Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.526714 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.644332 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" event={"ID":"17da633d-3102-4583-ba74-5c67cfb859c6","Type":"ContainerStarted","Data":"3a0d342cd514d88ad831e798378549b62e81c7b5cb0bee5162ede504f11421fb"} Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.666447 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-25mlz"] Nov 22 10:50:17 crc kubenswrapper[4938]: I1122 10:50:17.672965 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-vzl7q"] Nov 22 10:50:17 crc kubenswrapper[4938]: W1122 10:50:17.678020 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58946c83_00e0_4427_8232_d44f5f8f10e0.slice/crio-933c31518e72889cb5a31f6edb083eaa3eb60ee11b1567de6963d86b23b07fd7 WatchSource:0}: Error finding container 933c31518e72889cb5a31f6edb083eaa3eb60ee11b1567de6963d86b23b07fd7: Status 404 returned error can't find the container with id 933c31518e72889cb5a31f6edb083eaa3eb60ee11b1567de6963d86b23b07fd7 Nov 22 10:50:17 crc kubenswrapper[4938]: W1122 10:50:17.679243 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3faf7780_9f49_4f23_ac17_454fbeed3e79.slice/crio-15c9df13a7ab05b3fb9fc7b0fb99428b8465ef695fe5003b906da325d26e94a1 WatchSource:0}: Error finding container 15c9df13a7ab05b3fb9fc7b0fb99428b8465ef695fe5003b906da325d26e94a1: Status 404 returned error can't find the container with id 15c9df13a7ab05b3fb9fc7b0fb99428b8465ef695fe5003b906da325d26e94a1 Nov 22 10:50:18 crc kubenswrapper[4938]: I1122 10:50:18.652771 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-vzl7q" event={"ID":"58946c83-00e0-4427-8232-d44f5f8f10e0","Type":"ContainerStarted","Data":"933c31518e72889cb5a31f6edb083eaa3eb60ee11b1567de6963d86b23b07fd7"} Nov 22 10:50:18 crc kubenswrapper[4938]: I1122 10:50:18.654099 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" event={"ID":"3faf7780-9f49-4f23-ac17-454fbeed3e79","Type":"ContainerStarted","Data":"15c9df13a7ab05b3fb9fc7b0fb99428b8465ef695fe5003b906da325d26e94a1"} Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.671787 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-vzl7q" event={"ID":"58946c83-00e0-4427-8232-d44f5f8f10e0","Type":"ContainerStarted","Data":"f5403d96b6761f768b8f46b744c64bcf520d3efd8b96267123218fdc42b2de48"} Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.673266 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" event={"ID":"3faf7780-9f49-4f23-ac17-454fbeed3e79","Type":"ContainerStarted","Data":"3357e66fa1da33c4f9a60ba7626d52f64ed7cb276d67e81a3162fdad9597a69f"} Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.674185 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" event={"ID":"17da633d-3102-4583-ba74-5c67cfb859c6","Type":"ContainerStarted","Data":"e4c3eee9ad6db16aba7f08423347b9220fefc2d65f4c130388cf91bf35d484f6"} Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.674305 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.685221 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-vzl7q" podStartSLOduration=2.564508008 podStartE2EDuration="5.685206996s" podCreationTimestamp="2025-11-22 10:50:16 +0000 UTC" firstStartedPulling="2025-11-22 10:50:17.680609205 +0000 UTC m=+750.148446604" lastFinishedPulling="2025-11-22 10:50:20.801308193 +0000 UTC m=+753.269145592" observedRunningTime="2025-11-22 10:50:21.684675573 +0000 UTC m=+754.152512982" watchObservedRunningTime="2025-11-22 10:50:21.685206996 +0000 UTC m=+754.153044395" Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.702248 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-25mlz" podStartSLOduration=2.587400811 podStartE2EDuration="5.702231182s" podCreationTimestamp="2025-11-22 10:50:16 +0000 UTC" firstStartedPulling="2025-11-22 10:50:17.685295772 +0000 UTC m=+750.153133171" lastFinishedPulling="2025-11-22 10:50:20.800126143 +0000 UTC m=+753.267963542" observedRunningTime="2025-11-22 10:50:21.699673778 +0000 UTC m=+754.167511177" watchObservedRunningTime="2025-11-22 10:50:21.702231182 +0000 UTC m=+754.170068571" Nov 22 10:50:21 crc kubenswrapper[4938]: I1122 10:50:21.722154 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" podStartSLOduration=2.350370149 podStartE2EDuration="5.722135761s" podCreationTimestamp="2025-11-22 10:50:16 +0000 UTC" firstStartedPulling="2025-11-22 10:50:17.526445656 +0000 UTC m=+749.994283055" lastFinishedPulling="2025-11-22 10:50:20.898211238 +0000 UTC m=+753.366048667" observedRunningTime="2025-11-22 10:50:21.718642423 +0000 UTC m=+754.186479822" watchObservedRunningTime="2025-11-22 10:50:21.722135761 +0000 UTC m=+754.189973160" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.237997 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-cj5bz" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.445369 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8sphc"] Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.445689 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-controller" containerID="cri-o://2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446012 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-node" containerID="cri-o://bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446090 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-acl-logging" containerID="cri-o://114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446163 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="northd" containerID="cri-o://b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446188 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="sbdb" containerID="cri-o://0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446248 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="nbdb" containerID="cri-o://9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.446066 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.485546 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" containerID="cri-o://05e325b4fb3b81099ea954a2f435e8577e7a39a759a6db1aef36d5749167aaee" gracePeriod=30 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.706892 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/2.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.707710 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/1.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.707755 4938 generic.go:334] "Generic (PLEG): container finished" podID="671da3f6-347d-4f86-890d-155ef844b1f6" containerID="11d70986b71a5b1cbaaba2bd80285a38020e3fdfd8834cce96c5292beb37815e" exitCode=2 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.707811 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerDied","Data":"11d70986b71a5b1cbaaba2bd80285a38020e3fdfd8834cce96c5292beb37815e"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.707843 4938 scope.go:117] "RemoveContainer" containerID="60e61aec3bd8f859438cbd93d948bd90096a086a1cf8f58bdbb35399f634495a" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.708186 4938 scope.go:117] "RemoveContainer" containerID="11d70986b71a5b1cbaaba2bd80285a38020e3fdfd8834cce96c5292beb37815e" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.712019 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovnkube-controller/3.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.714201 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-acl-logging/0.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.720621 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-controller/0.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721330 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="05e325b4fb3b81099ea954a2f435e8577e7a39a759a6db1aef36d5749167aaee" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721362 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721370 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721379 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721386 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721394 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a" exitCode=0 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721401 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240" exitCode=143 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721408 4938 generic.go:334] "Generic (PLEG): container finished" podID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerID="2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7" exitCode=143 Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721402 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"05e325b4fb3b81099ea954a2f435e8577e7a39a759a6db1aef36d5749167aaee"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721446 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721461 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721473 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721486 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721499 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721514 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721526 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" event={"ID":"8b4b8200-248f-47ae-bed3-cbfd4598b99d","Type":"ContainerDied","Data":"5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3"} Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.721551 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f17757e13e746615e0aa52d7c15989432207432d0a56f26b64f9298f7cdb8b3" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.764509 4938 scope.go:117] "RemoveContainer" containerID="f16813abbb9f6017d793038d770277dc3735d9e7503488bb7a9fc23c4de7f68c" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.789270 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-acl-logging/0.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.789714 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-controller/0.log" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.790215 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839338 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dgv62"] Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839580 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839595 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839605 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kubecfg-setup" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839613 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kubecfg-setup" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839624 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="sbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839657 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="sbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839667 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839675 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839690 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-acl-logging" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839697 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-acl-logging" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839708 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="nbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839715 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="nbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839728 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839735 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839745 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839752 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839762 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839770 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839784 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839792 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839802 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="northd" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839809 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="northd" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839821 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839829 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:27 crc kubenswrapper[4938]: E1122 10:50:27.839840 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-node" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839847 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-node" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839980 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="northd" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.839994 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-acl-logging" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840007 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840016 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovn-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840025 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="nbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840036 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-node" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840045 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840053 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840063 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840073 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840082 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="sbdb" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.840328 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" containerName="ovnkube-controller" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.849075 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.896042 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.896460 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897114 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897235 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897334 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898058 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898172 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898265 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898383 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898498 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898600 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898703 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898793 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.898887 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.899094 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.899202 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.899533 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.899672 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900665 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900765 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kz2bx\" (UniqueName: \"kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx\") pod \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\" (UID: \"8b4b8200-248f-47ae-bed3-cbfd4598b99d\") " Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.896381 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897046 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897499 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897498 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.897536 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900392 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash" (OuterVolumeSpecName: "host-slash") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900438 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900452 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900464 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.900496 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902313 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902317 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902338 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902348 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902369 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket" (OuterVolumeSpecName: "log-socket") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902387 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log" (OuterVolumeSpecName: "node-log") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.902686 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.909469 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx" (OuterVolumeSpecName: "kube-api-access-kz2bx") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "kube-api-access-kz2bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.910098 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:50:27 crc kubenswrapper[4938]: I1122 10:50:27.923395 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "8b4b8200-248f-47ae-bed3-cbfd4598b99d" (UID: "8b4b8200-248f-47ae-bed3-cbfd4598b99d"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.002638 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.002941 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/dcad1764-6727-4dbb-b349-063de0e2af6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003058 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-ovn\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003155 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-etc-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003222 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-env-overrides\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003306 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003394 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-script-lib\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003458 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-node-log\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003531 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-log-socket\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003619 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-var-lib-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003727 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-slash\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003802 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-systemd-units\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003874 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.003981 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-kubelet\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004071 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-bin\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004162 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-netns\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004249 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gx75\" (UniqueName: \"kubernetes.io/projected/dcad1764-6727-4dbb-b349-063de0e2af6e-kube-api-access-2gx75\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004332 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-config\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004407 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-systemd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004496 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-netd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004609 4938 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004676 4938 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004737 4938 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004791 4938 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-slash\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004845 4938 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004900 4938 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.004982 4938 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-node-log\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005036 4938 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005089 4938 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005154 4938 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-log-socket\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005211 4938 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005263 4938 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005315 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kz2bx\" (UniqueName: \"kubernetes.io/projected/8b4b8200-248f-47ae-bed3-cbfd4598b99d-kube-api-access-kz2bx\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005363 4938 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005411 4938 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8b4b8200-248f-47ae-bed3-cbfd4598b99d-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005457 4938 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005512 4938 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005566 4938 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005620 4938 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.005671 4938 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8b4b8200-248f-47ae-bed3-cbfd4598b99d-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106643 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106688 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/dcad1764-6727-4dbb-b349-063de0e2af6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106713 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-ovn\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106741 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-etc-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106758 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-env-overrides\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106776 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106799 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-script-lib\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106820 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-node-log\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106836 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-log-socket\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106855 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-var-lib-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106876 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-slash\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106904 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-systemd-units\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106944 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106968 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-kubelet\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.106990 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-bin\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107009 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-netns\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107422 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107470 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-var-lib-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107482 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gx75\" (UniqueName: \"kubernetes.io/projected/dcad1764-6727-4dbb-b349-063de0e2af6e-kube-api-access-2gx75\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107525 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-systemd-units\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107502 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-slash\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107585 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-config\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107667 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107690 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-bin\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107718 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-kubelet\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107728 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-etc-openvswitch\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107752 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107775 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-run-netns\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107790 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-systemd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107799 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-ovn\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107810 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-netd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107771 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-node-log\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107821 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-run-systemd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.107869 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-host-cni-netd\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.108107 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/dcad1764-6727-4dbb-b349-063de0e2af6e-log-socket\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.108304 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-env-overrides\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.108389 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-config\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.108466 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/dcad1764-6727-4dbb-b349-063de0e2af6e-ovnkube-script-lib\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.111204 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/dcad1764-6727-4dbb-b349-063de0e2af6e-ovn-node-metrics-cert\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.121839 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gx75\" (UniqueName: \"kubernetes.io/projected/dcad1764-6727-4dbb-b349-063de0e2af6e-kube-api-access-2gx75\") pod \"ovnkube-node-dgv62\" (UID: \"dcad1764-6727-4dbb-b349-063de0e2af6e\") " pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.169347 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.340966 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.341413 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" containerID="cri-o://d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5" gracePeriod=30 Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.437470 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.437701 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerName="route-controller-manager" containerID="cri-o://b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a" gracePeriod=30 Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.535582 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.625167 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713379 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") pod \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713422 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") pod \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713444 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") pod \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713527 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") pod \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713588 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") pod \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\" (UID: \"d90bb3f2-72ce-41fa-b865-8892a4b70c06\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713677 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqf98\" (UniqueName: \"kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98\") pod \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.713692 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca\") pod \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.714273 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca" (OuterVolumeSpecName: "client-ca") pod "d90bb3f2-72ce-41fa-b865-8892a4b70c06" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.714378 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca" (OuterVolumeSpecName: "client-ca") pod "edf2b3e3-6340-4f0f-8688-08f4b7a918b1" (UID: "edf2b3e3-6340-4f0f-8688-08f4b7a918b1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.714852 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config" (OuterVolumeSpecName: "config") pod "d90bb3f2-72ce-41fa-b865-8892a4b70c06" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.715710 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "d90bb3f2-72ce-41fa-b865-8892a4b70c06" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.718477 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p" (OuterVolumeSpecName: "kube-api-access-h8t6p") pod "d90bb3f2-72ce-41fa-b865-8892a4b70c06" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06"). InnerVolumeSpecName "kube-api-access-h8t6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.718499 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98" (OuterVolumeSpecName: "kube-api-access-gqf98") pod "edf2b3e3-6340-4f0f-8688-08f4b7a918b1" (UID: "edf2b3e3-6340-4f0f-8688-08f4b7a918b1"). InnerVolumeSpecName "kube-api-access-gqf98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.718551 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d90bb3f2-72ce-41fa-b865-8892a4b70c06" (UID: "d90bb3f2-72ce-41fa-b865-8892a4b70c06"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.728324 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j67hq_671da3f6-347d-4f86-890d-155ef844b1f6/kube-multus/2.log" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.728421 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j67hq" event={"ID":"671da3f6-347d-4f86-890d-155ef844b1f6","Type":"ContainerStarted","Data":"f5cc0e68b3f931b058077f6eac2a771af54d2341a97ceafc6c0eae3a15a14a21"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.733128 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-acl-logging/0.log" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.733733 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-8sphc_8b4b8200-248f-47ae-bed3-cbfd4598b99d/ovn-controller/0.log" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.734219 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-8sphc" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.735638 4938 generic.go:334] "Generic (PLEG): container finished" podID="dcad1764-6727-4dbb-b349-063de0e2af6e" containerID="1ee8b5302abda531639b3be4d020a9fdfc25e552d3472b7e0813e2db42a92b7e" exitCode=0 Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.735716 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerDied","Data":"1ee8b5302abda531639b3be4d020a9fdfc25e552d3472b7e0813e2db42a92b7e"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.735754 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"d59c2d9b16edfd0056f87af649a63b2f787df850206044a9e58fd1fab34a9890"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.736998 4938 generic.go:334] "Generic (PLEG): container finished" podID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerID="b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a" exitCode=0 Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.737097 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.737103 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" event={"ID":"edf2b3e3-6340-4f0f-8688-08f4b7a918b1","Type":"ContainerDied","Data":"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.737130 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j" event={"ID":"edf2b3e3-6340-4f0f-8688-08f4b7a918b1","Type":"ContainerDied","Data":"153872a10e12af892b6bf660016b1fe44dcd73ff38d9b2d05f512e667dedba66"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.737153 4938 scope.go:117] "RemoveContainer" containerID="b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.741433 4938 generic.go:334] "Generic (PLEG): container finished" podID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerID="d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5" exitCode=0 Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.741469 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" event={"ID":"d90bb3f2-72ce-41fa-b865-8892a4b70c06","Type":"ContainerDied","Data":"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.741523 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" event={"ID":"d90bb3f2-72ce-41fa-b865-8892a4b70c06","Type":"ContainerDied","Data":"d7bc6f7adcf937d4a6888b17a1c1512123bef9b5af6f6e162632b15bb1d88eac"} Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.741536 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-88scl" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.760860 4938 scope.go:117] "RemoveContainer" containerID="b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a" Nov 22 10:50:28 crc kubenswrapper[4938]: E1122 10:50:28.761219 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a\": container with ID starting with b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a not found: ID does not exist" containerID="b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.761261 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a"} err="failed to get container status \"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a\": rpc error: code = NotFound desc = could not find container \"b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a\": container with ID starting with b591a044ca151fb1993bd8024525a15b241bc9ff3b6fd27e8921998fbd11a35a not found: ID does not exist" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.761290 4938 scope.go:117] "RemoveContainer" containerID="d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.781834 4938 scope.go:117] "RemoveContainer" containerID="d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5" Nov 22 10:50:28 crc kubenswrapper[4938]: E1122 10:50:28.782382 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5\": container with ID starting with d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5 not found: ID does not exist" containerID="d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.782428 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5"} err="failed to get container status \"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5\": rpc error: code = NotFound desc = could not find container \"d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5\": container with ID starting with d0e54a4f0ce6e2656dcdd5fbcf91250b7810e57fa7e7e4af5b386f9cdbf8b4f5 not found: ID does not exist" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.815444 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8sphc"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.817042 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config\") pod \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.817123 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert\") pod \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\" (UID: \"edf2b3e3-6340-4f0f-8688-08f4b7a918b1\") " Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.817982 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90bb3f2-72ce-41fa-b865-8892a4b70c06-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818009 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818021 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqf98\" (UniqueName: \"kubernetes.io/projected/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-kube-api-access-gqf98\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818033 4938 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818044 4938 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818056 4938 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90bb3f2-72ce-41fa-b865-8892a4b70c06-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818066 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8t6p\" (UniqueName: \"kubernetes.io/projected/d90bb3f2-72ce-41fa-b865-8892a4b70c06-kube-api-access-h8t6p\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.818276 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config" (OuterVolumeSpecName: "config") pod "edf2b3e3-6340-4f0f-8688-08f4b7a918b1" (UID: "edf2b3e3-6340-4f0f-8688-08f4b7a918b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.819759 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-8sphc"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.821871 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "edf2b3e3-6340-4f0f-8688-08f4b7a918b1" (UID: "edf2b3e3-6340-4f0f-8688-08f4b7a918b1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.831186 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.835066 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-88scl"] Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.919053 4938 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:28 crc kubenswrapper[4938]: I1122 10:50:28.919089 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edf2b3e3-6340-4f0f-8688-08f4b7a918b1-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.059344 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.061803 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-gw96j"] Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.606703 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-689587dbf9-mmh2t"] Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.607289 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.607307 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.607325 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerName="route-controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.607333 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerName="route-controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.607460 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" containerName="controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.607475 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" containerName="route-controller-manager" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.607960 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.610510 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.610597 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.610741 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.613015 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.613092 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.613416 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.622515 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.632852 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9"] Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.634445 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.637957 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.638373 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.638468 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.638384 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.638428 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.638677 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.729948 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-serving-cert\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.730292 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-proxy-ca-bundles\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.730324 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg4z4\" (UniqueName: \"kubernetes.io/projected/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-kube-api-access-wg4z4\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.730371 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-config\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.730393 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-client-ca\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.751622 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"52306bbde53a54edc0e09dd3ee8ac458c21d5881d4d28f590109f4217b2a41f4"} Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.751703 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"b64e0e58b17b792bab156c209a830103b9f7f9f77d6936ab6f6c546e3c8aeecd"} Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.751721 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"67a794f9ecf9811412184f8f31b6c4efac3e627b0b8c085341316ef4097649de"} Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.751735 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"e1fb7e503ca20880bdcb953ee9808318658c77068f755d97a4d463bdab1de02a"} Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.751748 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"98e72b0fd0b927dd46f4963c0d24d7aebac131e4c4d0e1d3a766037d64439ae3"} Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831575 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-client-ca\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831616 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-config\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831652 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a42e41ae-a133-4776-a110-d70a9b5bd9dd-serving-cert\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831669 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-serving-cert\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831702 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-config\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831724 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cz6ck\" (UniqueName: \"kubernetes.io/projected/a42e41ae-a133-4776-a110-d70a9b5bd9dd-kube-api-access-cz6ck\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831747 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-client-ca\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831782 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-proxy-ca-bundles\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.831800 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg4z4\" (UniqueName: \"kubernetes.io/projected/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-kube-api-access-wg4z4\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.832465 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-client-ca\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.832990 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-config\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.833535 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-proxy-ca-bundles\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.838317 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-serving-cert\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.848868 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg4z4\" (UniqueName: \"kubernetes.io/projected/0f5b9f51-9d95-4137-b285-4b5f7b8b4c32-kube-api-access-wg4z4\") pod \"controller-manager-689587dbf9-mmh2t\" (UID: \"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32\") " pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.921182 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.932824 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a42e41ae-a133-4776-a110-d70a9b5bd9dd-serving-cert\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.932955 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-config\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.933086 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cz6ck\" (UniqueName: \"kubernetes.io/projected/a42e41ae-a133-4776-a110-d70a9b5bd9dd-kube-api-access-cz6ck\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.933196 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-client-ca\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.934084 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-client-ca\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.934625 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42e41ae-a133-4776-a110-d70a9b5bd9dd-config\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.935991 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a42e41ae-a133-4776-a110-d70a9b5bd9dd-serving-cert\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.940952 4938 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(ebf2baf62ac1349609e6ca361338dc38a0d69015c4743d01cc2415b4cc4ad7a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.941003 4938 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(ebf2baf62ac1349609e6ca361338dc38a0d69015c4743d01cc2415b4cc4ad7a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.941021 4938 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(ebf2baf62ac1349609e6ca361338dc38a0d69015c4743d01cc2415b4cc4ad7a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.941057 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"controller-manager-689587dbf9-mmh2t_openshift-controller-manager(0f5b9f51-9d95-4137-b285-4b5f7b8b4c32)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"controller-manager-689587dbf9-mmh2t_openshift-controller-manager(0f5b9f51-9d95-4137-b285-4b5f7b8b4c32)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(ebf2baf62ac1349609e6ca361338dc38a0d69015c4743d01cc2415b4cc4ad7a1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" podUID="0f5b9f51-9d95-4137-b285-4b5f7b8b4c32" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.947853 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cz6ck\" (UniqueName: \"kubernetes.io/projected/a42e41ae-a133-4776-a110-d70a9b5bd9dd-kube-api-access-cz6ck\") pod \"route-controller-manager-84cf7f7df8-ck5r9\" (UID: \"a42e41ae-a133-4776-a110-d70a9b5bd9dd\") " pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: I1122 10:50:29.953548 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.970565 4938 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(4696c5f6f5a62be22cc9ac907ed67b3c3d09147b625a2afe9cea1a21238ad944): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.970625 4938 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(4696c5f6f5a62be22cc9ac907ed67b3c3d09147b625a2afe9cea1a21238ad944): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.970645 4938 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(4696c5f6f5a62be22cc9ac907ed67b3c3d09147b625a2afe9cea1a21238ad944): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:29 crc kubenswrapper[4938]: E1122 10:50:29.970689 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager(a42e41ae-a133-4776-a110-d70a9b5bd9dd)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager(a42e41ae-a133-4776-a110-d70a9b5bd9dd)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(4696c5f6f5a62be22cc9ac907ed67b3c3d09147b625a2afe9cea1a21238ad944): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" podUID="a42e41ae-a133-4776-a110-d70a9b5bd9dd" Nov 22 10:50:30 crc kubenswrapper[4938]: I1122 10:50:30.456013 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b4b8200-248f-47ae-bed3-cbfd4598b99d" path="/var/lib/kubelet/pods/8b4b8200-248f-47ae-bed3-cbfd4598b99d/volumes" Nov 22 10:50:30 crc kubenswrapper[4938]: I1122 10:50:30.457677 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d90bb3f2-72ce-41fa-b865-8892a4b70c06" path="/var/lib/kubelet/pods/d90bb3f2-72ce-41fa-b865-8892a4b70c06/volumes" Nov 22 10:50:30 crc kubenswrapper[4938]: I1122 10:50:30.458385 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edf2b3e3-6340-4f0f-8688-08f4b7a918b1" path="/var/lib/kubelet/pods/edf2b3e3-6340-4f0f-8688-08f4b7a918b1/volumes" Nov 22 10:50:30 crc kubenswrapper[4938]: I1122 10:50:30.759967 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"fd8e16cc4e78ef5441a3b8606b00ae50c2be29235120e4f36db63c3646754a1e"} Nov 22 10:50:32 crc kubenswrapper[4938]: I1122 10:50:32.776975 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"c2455423e5c49cb5343ca97148643c6c6d44411629acd1dff8716f931cc5d169"} Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.736807 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9"] Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.737505 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.737952 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.747810 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-689587dbf9-mmh2t"] Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.747984 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.748535 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.762159 4938 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(d15bd87781a2d9420d1c1c2260243bb16dee996e64b851fe87b77ca4c7aad969): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.762216 4938 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(d15bd87781a2d9420d1c1c2260243bb16dee996e64b851fe87b77ca4c7aad969): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.762239 4938 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(d15bd87781a2d9420d1c1c2260243bb16dee996e64b851fe87b77ca4c7aad969): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.762291 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager(a42e41ae-a133-4776-a110-d70a9b5bd9dd)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager(a42e41ae-a133-4776-a110-d70a9b5bd9dd)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_route-controller-manager-84cf7f7df8-ck5r9_openshift-route-controller-manager_a42e41ae-a133-4776-a110-d70a9b5bd9dd_0(d15bd87781a2d9420d1c1c2260243bb16dee996e64b851fe87b77ca4c7aad969): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" podUID="a42e41ae-a133-4776-a110-d70a9b5bd9dd" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.775087 4938 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(e1ddca3d692949687d55830ade6443b8d0f155032c352a81bac450450484d594): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.775177 4938 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(e1ddca3d692949687d55830ade6443b8d0f155032c352a81bac450450484d594): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.775211 4938 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(e1ddca3d692949687d55830ade6443b8d0f155032c352a81bac450450484d594): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:35 crc kubenswrapper[4938]: E1122 10:50:35.775281 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"controller-manager-689587dbf9-mmh2t_openshift-controller-manager(0f5b9f51-9d95-4137-b285-4b5f7b8b4c32)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"controller-manager-689587dbf9-mmh2t_openshift-controller-manager(0f5b9f51-9d95-4137-b285-4b5f7b8b4c32)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_controller-manager-689587dbf9-mmh2t_openshift-controller-manager_0f5b9f51-9d95-4137-b285-4b5f7b8b4c32_0(e1ddca3d692949687d55830ade6443b8d0f155032c352a81bac450450484d594): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" podUID="0f5b9f51-9d95-4137-b285-4b5f7b8b4c32" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.801078 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" event={"ID":"dcad1764-6727-4dbb-b349-063de0e2af6e","Type":"ContainerStarted","Data":"9361b8fe547756595906cb88ab010c14e2c256e0db3d626630eebd7ca2b8461f"} Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.802037 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.802064 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.802103 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.828866 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.830979 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:50:35 crc kubenswrapper[4938]: I1122 10:50:35.840621 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" podStartSLOduration=8.840598713 podStartE2EDuration="8.840598713s" podCreationTimestamp="2025-11-22 10:50:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:50:35.834623574 +0000 UTC m=+768.302460983" watchObservedRunningTime="2025-11-22 10:50:35.840598713 +0000 UTC m=+768.308436112" Nov 22 10:50:36 crc kubenswrapper[4938]: I1122 10:50:36.369327 4938 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:50:41 crc kubenswrapper[4938]: I1122 10:50:41.300343 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:50:41 crc kubenswrapper[4938]: I1122 10:50:41.300640 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.451416 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.452669 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.631113 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9"] Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.656038 4938 scope.go:117] "RemoveContainer" containerID="bf2bd58b530ba1758106651cbf01c2b14928af6ddd4548e2d4be0ee25242c73a" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.685392 4938 scope.go:117] "RemoveContainer" containerID="9758dbbc9f8b375f229a7d3e8f9db730499742f336c296717179304366d6130c" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.715827 4938 scope.go:117] "RemoveContainer" containerID="05e325b4fb3b81099ea954a2f435e8577e7a39a759a6db1aef36d5749167aaee" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.742590 4938 scope.go:117] "RemoveContainer" containerID="b7dd64b5d59aec184c5782b3da1942f777f00731a34fe98c188d9857901a2681" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.755120 4938 scope.go:117] "RemoveContainer" containerID="0156317b750231938c3e9eb519d80b38d099856264e701c740d9fbe76bdfd26d" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.770347 4938 scope.go:117] "RemoveContainer" containerID="114e37f8a9f6ae94371a173ffe9efdc35c8430009e6536274153141b2585d240" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.788335 4938 scope.go:117] "RemoveContainer" containerID="2c43ee513a74be3c04f044686dd082ced9999109a9bde5ebf7073d21c5924de7" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.810005 4938 scope.go:117] "RemoveContainer" containerID="d26647c8f44b5cdc497eaa30bfc786eddb812e9348def726159be8f9c5d779b3" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.826713 4938 scope.go:117] "RemoveContainer" containerID="9bfaff831fabcb023f071043e339d5e70f578ae904ec1d7f4d308cdacb02c928" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.879756 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" event={"ID":"a42e41ae-a133-4776-a110-d70a9b5bd9dd","Type":"ContainerStarted","Data":"38fb5102bfcbdd6f3df90435139adc685bfd75f45e05e7de4f51995cd10fe5f8"} Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.879791 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" event={"ID":"a42e41ae-a133-4776-a110-d70a9b5bd9dd","Type":"ContainerStarted","Data":"b88637151d3fd120e6410ce421f56f02116883b97c5350626f9f2c474b426f3a"} Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.879958 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:48 crc kubenswrapper[4938]: I1122 10:50:48.896212 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" podStartSLOduration=19.896196038 podStartE2EDuration="19.896196038s" podCreationTimestamp="2025-11-22 10:50:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:50:48.894880505 +0000 UTC m=+781.362717914" watchObservedRunningTime="2025-11-22 10:50:48.896196038 +0000 UTC m=+781.364033437" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.386777 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84cf7f7df8-ck5r9" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.446877 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.447389 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.644907 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-689587dbf9-mmh2t"] Nov 22 10:50:49 crc kubenswrapper[4938]: W1122 10:50:49.650597 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f5b9f51_9d95_4137_b285_4b5f7b8b4c32.slice/crio-e2edcaf02db936f9ee7a48aa520f774f2e830db9a9bdca9f234ffbecf865239d WatchSource:0}: Error finding container e2edcaf02db936f9ee7a48aa520f774f2e830db9a9bdca9f234ffbecf865239d: Status 404 returned error can't find the container with id e2edcaf02db936f9ee7a48aa520f774f2e830db9a9bdca9f234ffbecf865239d Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.899145 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" event={"ID":"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32","Type":"ContainerStarted","Data":"a76f480985af66ecd9013c864e3d13544211bc70066ee021c9215725da44a3bc"} Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.899184 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" event={"ID":"0f5b9f51-9d95-4137-b285-4b5f7b8b4c32","Type":"ContainerStarted","Data":"e2edcaf02db936f9ee7a48aa520f774f2e830db9a9bdca9f234ffbecf865239d"} Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.899632 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.904051 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" Nov 22 10:50:49 crc kubenswrapper[4938]: I1122 10:50:49.950132 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-689587dbf9-mmh2t" podStartSLOduration=20.950108341 podStartE2EDuration="20.950108341s" podCreationTimestamp="2025-11-22 10:50:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:50:49.91856001 +0000 UTC m=+782.386397409" watchObservedRunningTime="2025-11-22 10:50:49.950108341 +0000 UTC m=+782.417945740" Nov 22 10:50:58 crc kubenswrapper[4938]: I1122 10:50:58.191722 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dgv62" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.096766 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j"] Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.099113 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.100811 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.107090 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j"] Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.279361 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b44xr\" (UniqueName: \"kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.279438 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.279529 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.380812 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.380912 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b44xr\" (UniqueName: \"kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.380961 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.381384 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.381449 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.411393 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b44xr\" (UniqueName: \"kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.416244 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.848415 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j"] Nov 22 10:51:06 crc kubenswrapper[4938]: W1122 10:51:06.863885 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod645aab0f_dd96_4c5b_bb69_85d98c54bfe1.slice/crio-0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d WatchSource:0}: Error finding container 0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d: Status 404 returned error can't find the container with id 0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d Nov 22 10:51:06 crc kubenswrapper[4938]: I1122 10:51:06.980405 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" event={"ID":"645aab0f-dd96-4c5b-bb69-85d98c54bfe1","Type":"ContainerStarted","Data":"0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d"} Nov 22 10:51:07 crc kubenswrapper[4938]: I1122 10:51:07.988053 4938 generic.go:334] "Generic (PLEG): container finished" podID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerID="2e5fc59b11985764978775ad0184331d1b1a1411fffefafdc87c1a4b1d624870" exitCode=0 Nov 22 10:51:07 crc kubenswrapper[4938]: I1122 10:51:07.988114 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" event={"ID":"645aab0f-dd96-4c5b-bb69-85d98c54bfe1","Type":"ContainerDied","Data":"2e5fc59b11985764978775ad0184331d1b1a1411fffefafdc87c1a4b1d624870"} Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.348650 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.350150 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.359474 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.509263 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.509426 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.509547 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qrhk\" (UniqueName: \"kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.610911 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.610987 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.611023 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qrhk\" (UniqueName: \"kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.611401 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.611431 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.631137 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qrhk\" (UniqueName: \"kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk\") pod \"redhat-operators-zhx2n\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:08 crc kubenswrapper[4938]: I1122 10:51:08.664841 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:09 crc kubenswrapper[4938]: I1122 10:51:09.106446 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:09 crc kubenswrapper[4938]: W1122 10:51:09.114413 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9811462e_d47f_445f_8124_b041c4c0935e.slice/crio-7d2b0ec11049b272726735b54c9c264441a0c1d0c6ebb4678839f10115cb163f WatchSource:0}: Error finding container 7d2b0ec11049b272726735b54c9c264441a0c1d0c6ebb4678839f10115cb163f: Status 404 returned error can't find the container with id 7d2b0ec11049b272726735b54c9c264441a0c1d0c6ebb4678839f10115cb163f Nov 22 10:51:10 crc kubenswrapper[4938]: I1122 10:51:10.001443 4938 generic.go:334] "Generic (PLEG): container finished" podID="9811462e-d47f-445f-8124-b041c4c0935e" containerID="857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8" exitCode=0 Nov 22 10:51:10 crc kubenswrapper[4938]: I1122 10:51:10.001512 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerDied","Data":"857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8"} Nov 22 10:51:10 crc kubenswrapper[4938]: I1122 10:51:10.001537 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerStarted","Data":"7d2b0ec11049b272726735b54c9c264441a0c1d0c6ebb4678839f10115cb163f"} Nov 22 10:51:10 crc kubenswrapper[4938]: I1122 10:51:10.003820 4938 generic.go:334] "Generic (PLEG): container finished" podID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerID="863359a51362d4364964a7f214f78d9a1e79e12e711d431e38c6c54a467f2d3c" exitCode=0 Nov 22 10:51:10 crc kubenswrapper[4938]: I1122 10:51:10.003850 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" event={"ID":"645aab0f-dd96-4c5b-bb69-85d98c54bfe1","Type":"ContainerDied","Data":"863359a51362d4364964a7f214f78d9a1e79e12e711d431e38c6c54a467f2d3c"} Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.009790 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerStarted","Data":"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627"} Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.013222 4938 generic.go:334] "Generic (PLEG): container finished" podID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerID="0bd2ab402907df84dd15d8b4f8aeaedbdd2fa7741b03dd10bdecbec14bb11c7b" exitCode=0 Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.013263 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" event={"ID":"645aab0f-dd96-4c5b-bb69-85d98c54bfe1","Type":"ContainerDied","Data":"0bd2ab402907df84dd15d8b4f8aeaedbdd2fa7741b03dd10bdecbec14bb11c7b"} Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.301269 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.301399 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.301487 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.302601 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:51:11 crc kubenswrapper[4938]: I1122 10:51:11.302696 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f" gracePeriod=600 Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.020645 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f" exitCode=0 Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.020705 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f"} Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.021939 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe"} Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.022068 4938 scope.go:117] "RemoveContainer" containerID="9d04d2bb9c07a4ef07461763ab344623cc95c9776ecc97046a4862e181d472ec" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.024416 4938 generic.go:334] "Generic (PLEG): container finished" podID="9811462e-d47f-445f-8124-b041c4c0935e" containerID="017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627" exitCode=0 Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.024476 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerDied","Data":"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627"} Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.382183 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.471534 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util\") pod \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.471607 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b44xr\" (UniqueName: \"kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr\") pod \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.471632 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle\") pod \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\" (UID: \"645aab0f-dd96-4c5b-bb69-85d98c54bfe1\") " Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.472268 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle" (OuterVolumeSpecName: "bundle") pod "645aab0f-dd96-4c5b-bb69-85d98c54bfe1" (UID: "645aab0f-dd96-4c5b-bb69-85d98c54bfe1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.476759 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr" (OuterVolumeSpecName: "kube-api-access-b44xr") pod "645aab0f-dd96-4c5b-bb69-85d98c54bfe1" (UID: "645aab0f-dd96-4c5b-bb69-85d98c54bfe1"). InnerVolumeSpecName "kube-api-access-b44xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.572748 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b44xr\" (UniqueName: \"kubernetes.io/projected/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-kube-api-access-b44xr\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.572789 4938 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.782404 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util" (OuterVolumeSpecName: "util") pod "645aab0f-dd96-4c5b-bb69-85d98c54bfe1" (UID: "645aab0f-dd96-4c5b-bb69-85d98c54bfe1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:12 crc kubenswrapper[4938]: I1122 10:51:12.877186 4938 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/645aab0f-dd96-4c5b-bb69-85d98c54bfe1-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:13 crc kubenswrapper[4938]: I1122 10:51:13.034483 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerStarted","Data":"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26"} Nov 22 10:51:13 crc kubenswrapper[4938]: I1122 10:51:13.036960 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" event={"ID":"645aab0f-dd96-4c5b-bb69-85d98c54bfe1","Type":"ContainerDied","Data":"0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d"} Nov 22 10:51:13 crc kubenswrapper[4938]: I1122 10:51:13.037023 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e7b4528d38def343cb2e11e8d68ee7882e896ee693cb3eb4cc0bfe4a2e4c40d" Nov 22 10:51:13 crc kubenswrapper[4938]: I1122 10:51:13.036985 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j" Nov 22 10:51:13 crc kubenswrapper[4938]: I1122 10:51:13.053119 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zhx2n" podStartSLOduration=2.545825363 podStartE2EDuration="5.053094989s" podCreationTimestamp="2025-11-22 10:51:08 +0000 UTC" firstStartedPulling="2025-11-22 10:51:10.003049589 +0000 UTC m=+802.470886988" lastFinishedPulling="2025-11-22 10:51:12.510319215 +0000 UTC m=+804.978156614" observedRunningTime="2025-11-22 10:51:13.052285928 +0000 UTC m=+805.520123347" watchObservedRunningTime="2025-11-22 10:51:13.053094989 +0000 UTC m=+805.520932398" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.936864 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-g9mdw"] Nov 22 10:51:16 crc kubenswrapper[4938]: E1122 10:51:16.937596 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="util" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.937609 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="util" Nov 22 10:51:16 crc kubenswrapper[4938]: E1122 10:51:16.937622 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="pull" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.937627 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="pull" Nov 22 10:51:16 crc kubenswrapper[4938]: E1122 10:51:16.937636 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="extract" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.937643 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="extract" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.937726 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="645aab0f-dd96-4c5b-bb69-85d98c54bfe1" containerName="extract" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.938090 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.939672 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.939974 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-xpxmw" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.940174 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 22 10:51:16 crc kubenswrapper[4938]: I1122 10:51:16.947659 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-g9mdw"] Nov 22 10:51:17 crc kubenswrapper[4938]: I1122 10:51:17.027979 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4d6c\" (UniqueName: \"kubernetes.io/projected/f899d59c-f579-4865-985d-d87c4fc54922-kube-api-access-t4d6c\") pod \"nmstate-operator-557fdffb88-g9mdw\" (UID: \"f899d59c-f579-4865-985d-d87c4fc54922\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" Nov 22 10:51:17 crc kubenswrapper[4938]: I1122 10:51:17.128896 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4d6c\" (UniqueName: \"kubernetes.io/projected/f899d59c-f579-4865-985d-d87c4fc54922-kube-api-access-t4d6c\") pod \"nmstate-operator-557fdffb88-g9mdw\" (UID: \"f899d59c-f579-4865-985d-d87c4fc54922\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" Nov 22 10:51:17 crc kubenswrapper[4938]: I1122 10:51:17.147030 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4d6c\" (UniqueName: \"kubernetes.io/projected/f899d59c-f579-4865-985d-d87c4fc54922-kube-api-access-t4d6c\") pod \"nmstate-operator-557fdffb88-g9mdw\" (UID: \"f899d59c-f579-4865-985d-d87c4fc54922\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" Nov 22 10:51:17 crc kubenswrapper[4938]: I1122 10:51:17.255351 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" Nov 22 10:51:17 crc kubenswrapper[4938]: I1122 10:51:17.652223 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-g9mdw"] Nov 22 10:51:18 crc kubenswrapper[4938]: I1122 10:51:18.073823 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" event={"ID":"f899d59c-f579-4865-985d-d87c4fc54922","Type":"ContainerStarted","Data":"1797e758d7af156f1c0029a9f5c53bf3243ce1a76568953c2ff2786014f020b6"} Nov 22 10:51:18 crc kubenswrapper[4938]: I1122 10:51:18.665594 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:18 crc kubenswrapper[4938]: I1122 10:51:18.665684 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:18 crc kubenswrapper[4938]: I1122 10:51:18.728082 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:19 crc kubenswrapper[4938]: I1122 10:51:19.124469 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:20 crc kubenswrapper[4938]: I1122 10:51:20.088651 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" event={"ID":"f899d59c-f579-4865-985d-d87c4fc54922","Type":"ContainerStarted","Data":"4af1e8212b40c0d7e9a66870fab92e088cf23877c16466a7cd935381d273f4d9"} Nov 22 10:51:20 crc kubenswrapper[4938]: I1122 10:51:20.107609 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-g9mdw" podStartSLOduration=2.06123507 podStartE2EDuration="4.107591395s" podCreationTimestamp="2025-11-22 10:51:16 +0000 UTC" firstStartedPulling="2025-11-22 10:51:17.665592887 +0000 UTC m=+810.133430276" lastFinishedPulling="2025-11-22 10:51:19.711949192 +0000 UTC m=+812.179786601" observedRunningTime="2025-11-22 10:51:20.101978394 +0000 UTC m=+812.569815793" watchObservedRunningTime="2025-11-22 10:51:20.107591395 +0000 UTC m=+812.575428794" Nov 22 10:51:21 crc kubenswrapper[4938]: I1122 10:51:21.136529 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:21 crc kubenswrapper[4938]: I1122 10:51:21.137075 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zhx2n" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="registry-server" containerID="cri-o://d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26" gracePeriod=2 Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.087389 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.102408 4938 generic.go:334] "Generic (PLEG): container finished" podID="9811462e-d47f-445f-8124-b041c4c0935e" containerID="d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26" exitCode=0 Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.102445 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerDied","Data":"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26"} Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.102490 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zhx2n" event={"ID":"9811462e-d47f-445f-8124-b041c4c0935e","Type":"ContainerDied","Data":"7d2b0ec11049b272726735b54c9c264441a0c1d0c6ebb4678839f10115cb163f"} Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.102508 4938 scope.go:117] "RemoveContainer" containerID="d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.102628 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zhx2n" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.121066 4938 scope.go:117] "RemoveContainer" containerID="017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.137042 4938 scope.go:117] "RemoveContainer" containerID="857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.152999 4938 scope.go:117] "RemoveContainer" containerID="d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26" Nov 22 10:51:22 crc kubenswrapper[4938]: E1122 10:51:22.153451 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26\": container with ID starting with d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26 not found: ID does not exist" containerID="d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.153479 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26"} err="failed to get container status \"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26\": rpc error: code = NotFound desc = could not find container \"d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26\": container with ID starting with d65df720d672e6664a034e3cc40dfb06bdb376ba89856177085ff4e920c3fb26 not found: ID does not exist" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.153500 4938 scope.go:117] "RemoveContainer" containerID="017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627" Nov 22 10:51:22 crc kubenswrapper[4938]: E1122 10:51:22.153886 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627\": container with ID starting with 017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627 not found: ID does not exist" containerID="017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.153905 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627"} err="failed to get container status \"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627\": rpc error: code = NotFound desc = could not find container \"017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627\": container with ID starting with 017c5ffcf8059b77239e382353b6faaebccc9bf2f3df9516e889c3618d10e627 not found: ID does not exist" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.153931 4938 scope.go:117] "RemoveContainer" containerID="857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8" Nov 22 10:51:22 crc kubenswrapper[4938]: E1122 10:51:22.154239 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8\": container with ID starting with 857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8 not found: ID does not exist" containerID="857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.154263 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8"} err="failed to get container status \"857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8\": rpc error: code = NotFound desc = could not find container \"857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8\": container with ID starting with 857c7e0a6b9dd8e963c1049cc24fc06f5277a65ef97935cdcbd530faf02e58b8 not found: ID does not exist" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.289776 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities\") pod \"9811462e-d47f-445f-8124-b041c4c0935e\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.289827 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content\") pod \"9811462e-d47f-445f-8124-b041c4c0935e\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.289861 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qrhk\" (UniqueName: \"kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk\") pod \"9811462e-d47f-445f-8124-b041c4c0935e\" (UID: \"9811462e-d47f-445f-8124-b041c4c0935e\") " Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.290836 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities" (OuterVolumeSpecName: "utilities") pod "9811462e-d47f-445f-8124-b041c4c0935e" (UID: "9811462e-d47f-445f-8124-b041c4c0935e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.294484 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk" (OuterVolumeSpecName: "kube-api-access-7qrhk") pod "9811462e-d47f-445f-8124-b041c4c0935e" (UID: "9811462e-d47f-445f-8124-b041c4c0935e"). InnerVolumeSpecName "kube-api-access-7qrhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.391317 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.391368 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qrhk\" (UniqueName: \"kubernetes.io/projected/9811462e-d47f-445f-8124-b041c4c0935e-kube-api-access-7qrhk\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.392509 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9811462e-d47f-445f-8124-b041c4c0935e" (UID: "9811462e-d47f-445f-8124-b041c4c0935e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.431472 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.433973 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zhx2n"] Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.453667 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9811462e-d47f-445f-8124-b041c4c0935e" path="/var/lib/kubelet/pods/9811462e-d47f-445f-8124-b041c4c0935e/volumes" Nov 22 10:51:22 crc kubenswrapper[4938]: I1122 10:51:22.495619 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9811462e-d47f-445f-8124-b041c4c0935e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.844672 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7"] Nov 22 10:51:26 crc kubenswrapper[4938]: E1122 10:51:26.845405 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="extract-utilities" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.845422 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="extract-utilities" Nov 22 10:51:26 crc kubenswrapper[4938]: E1122 10:51:26.845436 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="extract-content" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.845460 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="extract-content" Nov 22 10:51:26 crc kubenswrapper[4938]: E1122 10:51:26.845480 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="registry-server" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.845487 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="registry-server" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.845637 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="9811462e-d47f-445f-8124-b041c4c0935e" containerName="registry-server" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.846452 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.851028 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-g28vz" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.855644 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj"] Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.856485 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.859221 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.891961 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj"] Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.894528 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cg4pn\" (UniqueName: \"kubernetes.io/projected/475ad90f-3054-4344-885d-9fe424557efd-kube-api-access-cg4pn\") pod \"nmstate-metrics-5dcf9c57c5-xfvp7\" (UID: \"475ad90f-3054-4344-885d-9fe424557efd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.895829 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-jhvqn"] Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.896680 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.920265 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7"] Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997520 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqmv6\" (UniqueName: \"kubernetes.io/projected/32b53aaa-95c2-4834-a57d-955709a2e992-kube-api-access-wqmv6\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997588 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cg4pn\" (UniqueName: \"kubernetes.io/projected/475ad90f-3054-4344-885d-9fe424557efd-kube-api-access-cg4pn\") pod \"nmstate-metrics-5dcf9c57c5-xfvp7\" (UID: \"475ad90f-3054-4344-885d-9fe424557efd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997618 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-dbus-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997652 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-ovs-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997674 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997703 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks4w9\" (UniqueName: \"kubernetes.io/projected/286573ca-1f3c-4af4-8bfb-ba8b52224082-kube-api-access-ks4w9\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:26 crc kubenswrapper[4938]: I1122 10:51:26.997736 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-nmstate-lock\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.009675 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52"] Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.010369 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.017253 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-n7tlk" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.017394 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.017477 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.044325 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cg4pn\" (UniqueName: \"kubernetes.io/projected/475ad90f-3054-4344-885d-9fe424557efd-kube-api-access-cg4pn\") pod \"nmstate-metrics-5dcf9c57c5-xfvp7\" (UID: \"475ad90f-3054-4344-885d-9fe424557efd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.084629 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52"] Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100025 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-ovs-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100064 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100095 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d740b57-09d5-44f9-90c2-bf2cfeb44311-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100128 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks4w9\" (UniqueName: \"kubernetes.io/projected/286573ca-1f3c-4af4-8bfb-ba8b52224082-kube-api-access-ks4w9\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100154 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9z9b\" (UniqueName: \"kubernetes.io/projected/6d740b57-09d5-44f9-90c2-bf2cfeb44311-kube-api-access-l9z9b\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100184 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-nmstate-lock\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100209 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100234 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqmv6\" (UniqueName: \"kubernetes.io/projected/32b53aaa-95c2-4834-a57d-955709a2e992-kube-api-access-wqmv6\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100254 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-dbus-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100568 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-dbus-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.100605 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-ovs-socket\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: E1122 10:51:27.100674 4938 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 22 10:51:27 crc kubenswrapper[4938]: E1122 10:51:27.100716 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair podName:32b53aaa-95c2-4834-a57d-955709a2e992 nodeName:}" failed. No retries permitted until 2025-11-22 10:51:27.600699852 +0000 UTC m=+820.068537251 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair") pod "nmstate-webhook-6b89b748d8-jzqlj" (UID: "32b53aaa-95c2-4834-a57d-955709a2e992") : secret "openshift-nmstate-webhook" not found Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.101066 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/286573ca-1f3c-4af4-8bfb-ba8b52224082-nmstate-lock\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.144404 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqmv6\" (UniqueName: \"kubernetes.io/projected/32b53aaa-95c2-4834-a57d-955709a2e992-kube-api-access-wqmv6\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.159144 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks4w9\" (UniqueName: \"kubernetes.io/projected/286573ca-1f3c-4af4-8bfb-ba8b52224082-kube-api-access-ks4w9\") pod \"nmstate-handler-jhvqn\" (UID: \"286573ca-1f3c-4af4-8bfb-ba8b52224082\") " pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.175361 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.201510 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d740b57-09d5-44f9-90c2-bf2cfeb44311-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.201653 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9z9b\" (UniqueName: \"kubernetes.io/projected/6d740b57-09d5-44f9-90c2-bf2cfeb44311-kube-api-access-l9z9b\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.202342 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: E1122 10:51:27.202577 4938 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 22 10:51:27 crc kubenswrapper[4938]: E1122 10:51:27.202642 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert podName:6d740b57-09d5-44f9-90c2-bf2cfeb44311 nodeName:}" failed. No retries permitted until 2025-11-22 10:51:27.702623509 +0000 UTC m=+820.170460908 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-msh52" (UID: "6d740b57-09d5-44f9-90c2-bf2cfeb44311") : secret "plugin-serving-cert" not found Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.203295 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6d740b57-09d5-44f9-90c2-bf2cfeb44311-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.221364 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.238035 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9z9b\" (UniqueName: \"kubernetes.io/projected/6d740b57-09d5-44f9-90c2-bf2cfeb44311-kube-api-access-l9z9b\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.252584 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-557bd66844-bmw42"] Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.253297 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.259396 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-557bd66844-bmw42"] Nov 22 10:51:27 crc kubenswrapper[4938]: W1122 10:51:27.291865 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod286573ca_1f3c_4af4_8bfb_ba8b52224082.slice/crio-96792c7e3f8a1f7079317ed7927318c63e9f39ea01c835c482e0082138cd44f0 WatchSource:0}: Error finding container 96792c7e3f8a1f7079317ed7927318c63e9f39ea01c835c482e0082138cd44f0: Status 404 returned error can't find the container with id 96792c7e3f8a1f7079317ed7927318c63e9f39ea01c835c482e0082138cd44f0 Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.304275 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27vbx\" (UniqueName: \"kubernetes.io/projected/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-kube-api-access-27vbx\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305164 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-trusted-ca-bundle\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305192 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305231 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-service-ca\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305281 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-oauth-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305312 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.305343 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-oauth-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.405982 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-service-ca\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.406107 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-oauth-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.406141 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407006 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.406963 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-oauth-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407006 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-service-ca\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407072 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-oauth-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407177 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27vbx\" (UniqueName: \"kubernetes.io/projected/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-kube-api-access-27vbx\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407215 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-trusted-ca-bundle\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.407233 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.409549 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-trusted-ca-bundle\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.410808 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-oauth-config\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.411028 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-console-serving-cert\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.423683 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27vbx\" (UniqueName: \"kubernetes.io/projected/ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d-kube-api-access-27vbx\") pod \"console-557bd66844-bmw42\" (UID: \"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d\") " pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.574542 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.609372 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.616158 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/32b53aaa-95c2-4834-a57d-955709a2e992-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-jzqlj\" (UID: \"32b53aaa-95c2-4834-a57d-955709a2e992\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.633546 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7"] Nov 22 10:51:27 crc kubenswrapper[4938]: W1122 10:51:27.641692 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod475ad90f_3054_4344_885d_9fe424557efd.slice/crio-ad940219efc4a673352547c3bc7a427e7c36467583056bfc6a3d31c801d5f8c7 WatchSource:0}: Error finding container ad940219efc4a673352547c3bc7a427e7c36467583056bfc6a3d31c801d5f8c7: Status 404 returned error can't find the container with id ad940219efc4a673352547c3bc7a427e7c36467583056bfc6a3d31c801d5f8c7 Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.710562 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.715303 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6d740b57-09d5-44f9-90c2-bf2cfeb44311-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-msh52\" (UID: \"6d740b57-09d5-44f9-90c2-bf2cfeb44311\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.789670 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:27 crc kubenswrapper[4938]: I1122 10:51:27.932949 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.016141 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-557bd66844-bmw42"] Nov 22 10:51:28 crc kubenswrapper[4938]: W1122 10:51:28.024296 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab0c61b2_0d95_4c26_8dbe_1e9dd62ba79d.slice/crio-2a6011fa284f084bb04b931c237228e5fbad890ade69bec2ef476184d7db97e9 WatchSource:0}: Error finding container 2a6011fa284f084bb04b931c237228e5fbad890ade69bec2ef476184d7db97e9: Status 404 returned error can't find the container with id 2a6011fa284f084bb04b931c237228e5fbad890ade69bec2ef476184d7db97e9 Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.162239 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj"] Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.180710 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" event={"ID":"32b53aaa-95c2-4834-a57d-955709a2e992","Type":"ContainerStarted","Data":"bef7db4030eb2e00346bd36cd8888a736ba9701abab4e1f9b51360e70abf85af"} Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.181959 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-jhvqn" event={"ID":"286573ca-1f3c-4af4-8bfb-ba8b52224082","Type":"ContainerStarted","Data":"96792c7e3f8a1f7079317ed7927318c63e9f39ea01c835c482e0082138cd44f0"} Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.183439 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-557bd66844-bmw42" event={"ID":"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d","Type":"ContainerStarted","Data":"2a6011fa284f084bb04b931c237228e5fbad890ade69bec2ef476184d7db97e9"} Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.186426 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" event={"ID":"475ad90f-3054-4344-885d-9fe424557efd","Type":"ContainerStarted","Data":"ad940219efc4a673352547c3bc7a427e7c36467583056bfc6a3d31c801d5f8c7"} Nov 22 10:51:28 crc kubenswrapper[4938]: I1122 10:51:28.311419 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52"] Nov 22 10:51:28 crc kubenswrapper[4938]: W1122 10:51:28.317848 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d740b57_09d5_44f9_90c2_bf2cfeb44311.slice/crio-4e8b84c667067b5f684606f7f0718fb9bc296f84d48ed3827869bb70cd70ad83 WatchSource:0}: Error finding container 4e8b84c667067b5f684606f7f0718fb9bc296f84d48ed3827869bb70cd70ad83: Status 404 returned error can't find the container with id 4e8b84c667067b5f684606f7f0718fb9bc296f84d48ed3827869bb70cd70ad83 Nov 22 10:51:29 crc kubenswrapper[4938]: I1122 10:51:29.192272 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" event={"ID":"6d740b57-09d5-44f9-90c2-bf2cfeb44311","Type":"ContainerStarted","Data":"4e8b84c667067b5f684606f7f0718fb9bc296f84d48ed3827869bb70cd70ad83"} Nov 22 10:51:29 crc kubenswrapper[4938]: I1122 10:51:29.193465 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-557bd66844-bmw42" event={"ID":"ab0c61b2-0d95-4c26-8dbe-1e9dd62ba79d","Type":"ContainerStarted","Data":"af30cfc0ff8a2e79fb11b171d25d058f55b017da62d95be1a7011faff9c35c68"} Nov 22 10:51:29 crc kubenswrapper[4938]: I1122 10:51:29.211759 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-557bd66844-bmw42" podStartSLOduration=2.21172331 podStartE2EDuration="2.21172331s" podCreationTimestamp="2025-11-22 10:51:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:51:29.20811185 +0000 UTC m=+821.675949249" watchObservedRunningTime="2025-11-22 10:51:29.21172331 +0000 UTC m=+821.679560699" Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.201224 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" event={"ID":"475ad90f-3054-4344-885d-9fe424557efd","Type":"ContainerStarted","Data":"d4b108112aeb426f5805c107a5399e442ba417da17051184ba28b820e1a5fe03"} Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.203290 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" event={"ID":"32b53aaa-95c2-4834-a57d-955709a2e992","Type":"ContainerStarted","Data":"c8d4e2938be67a7a354e79423c3020146002643d9b0d9e65af496ee7975a9360"} Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.204140 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.205395 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-jhvqn" event={"ID":"286573ca-1f3c-4af4-8bfb-ba8b52224082","Type":"ContainerStarted","Data":"8f025a79b24b9dd0c9df72f81b6e450b76d66d588941e0d143971dd9776f526e"} Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.205522 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.223419 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" podStartSLOduration=2.64078792 podStartE2EDuration="4.223404645s" podCreationTimestamp="2025-11-22 10:51:26 +0000 UTC" firstStartedPulling="2025-11-22 10:51:28.166835442 +0000 UTC m=+820.634672841" lastFinishedPulling="2025-11-22 10:51:29.749452167 +0000 UTC m=+822.217289566" observedRunningTime="2025-11-22 10:51:30.2196397 +0000 UTC m=+822.687477099" watchObservedRunningTime="2025-11-22 10:51:30.223404645 +0000 UTC m=+822.691242044" Nov 22 10:51:30 crc kubenswrapper[4938]: I1122 10:51:30.237704 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-jhvqn" podStartSLOduration=1.802632818 podStartE2EDuration="4.237685453s" podCreationTimestamp="2025-11-22 10:51:26 +0000 UTC" firstStartedPulling="2025-11-22 10:51:27.298034592 +0000 UTC m=+819.765871991" lastFinishedPulling="2025-11-22 10:51:29.733087227 +0000 UTC m=+822.200924626" observedRunningTime="2025-11-22 10:51:30.234702858 +0000 UTC m=+822.702540257" watchObservedRunningTime="2025-11-22 10:51:30.237685453 +0000 UTC m=+822.705522852" Nov 22 10:51:31 crc kubenswrapper[4938]: I1122 10:51:31.211676 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" event={"ID":"6d740b57-09d5-44f9-90c2-bf2cfeb44311","Type":"ContainerStarted","Data":"2209a4dd0034bd0caaaab393e844da6b3b8aa1143bdfaf85ae38965782a1378c"} Nov 22 10:51:31 crc kubenswrapper[4938]: I1122 10:51:31.228365 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-msh52" podStartSLOduration=2.87246803 podStartE2EDuration="5.228348529s" podCreationTimestamp="2025-11-22 10:51:26 +0000 UTC" firstStartedPulling="2025-11-22 10:51:28.319881201 +0000 UTC m=+820.787718600" lastFinishedPulling="2025-11-22 10:51:30.6757617 +0000 UTC m=+823.143599099" observedRunningTime="2025-11-22 10:51:31.227222241 +0000 UTC m=+823.695059640" watchObservedRunningTime="2025-11-22 10:51:31.228348529 +0000 UTC m=+823.696185928" Nov 22 10:51:32 crc kubenswrapper[4938]: I1122 10:51:32.219504 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" event={"ID":"475ad90f-3054-4344-885d-9fe424557efd","Type":"ContainerStarted","Data":"239ca0ea6dddc46bdf7cac929b3a7b9833b6cb608ea42f7acec28d3d54856378"} Nov 22 10:51:37 crc kubenswrapper[4938]: I1122 10:51:37.247962 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-jhvqn" Nov 22 10:51:37 crc kubenswrapper[4938]: I1122 10:51:37.268815 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-xfvp7" podStartSLOduration=7.176949643 podStartE2EDuration="11.268795153s" podCreationTimestamp="2025-11-22 10:51:26 +0000 UTC" firstStartedPulling="2025-11-22 10:51:27.646205164 +0000 UTC m=+820.114042563" lastFinishedPulling="2025-11-22 10:51:31.738050674 +0000 UTC m=+824.205888073" observedRunningTime="2025-11-22 10:51:32.243399708 +0000 UTC m=+824.711237127" watchObservedRunningTime="2025-11-22 10:51:37.268795153 +0000 UTC m=+829.736632562" Nov 22 10:51:37 crc kubenswrapper[4938]: I1122 10:51:37.574986 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:37 crc kubenswrapper[4938]: I1122 10:51:37.575252 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:37 crc kubenswrapper[4938]: I1122 10:51:37.581309 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:38 crc kubenswrapper[4938]: I1122 10:51:38.260830 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-557bd66844-bmw42" Nov 22 10:51:38 crc kubenswrapper[4938]: I1122 10:51:38.333779 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:51:47 crc kubenswrapper[4938]: I1122 10:51:47.797439 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-jzqlj" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.212942 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs"] Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.214674 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.216468 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.223106 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs"] Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.377589 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.378223 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scn8r\" (UniqueName: \"kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.378314 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.479071 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scn8r\" (UniqueName: \"kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.479360 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.479475 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.480205 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.480379 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.498416 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scn8r\" (UniqueName: \"kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.531117 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:02 crc kubenswrapper[4938]: I1122 10:52:02.938255 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs"] Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.377268 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-rl6xd" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" containerID="cri-o://1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb" gracePeriod=15 Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.409110 4938 generic.go:334] "Generic (PLEG): container finished" podID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerID="ddde20bf58f57489502d2df0364b28c5838857091c26e49e712843ae8650f76f" exitCode=0 Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.409175 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" event={"ID":"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7","Type":"ContainerDied","Data":"ddde20bf58f57489502d2df0364b28c5838857091c26e49e712843ae8650f76f"} Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.409221 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" event={"ID":"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7","Type":"ContainerStarted","Data":"6694075e07c08bfdd073f2fa2aef7bc0e0c390fd25fcc935ffe11c3ec9e42bab"} Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.710039 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rl6xd_4461eec4-354b-417f-b8ae-24e3deed3a5a/console/0.log" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.710286 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796379 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796432 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796448 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796493 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796510 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqxmh\" (UniqueName: \"kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796532 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.796548 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config\") pod \"4461eec4-354b-417f-b8ae-24e3deed3a5a\" (UID: \"4461eec4-354b-417f-b8ae-24e3deed3a5a\") " Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.797533 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.797598 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config" (OuterVolumeSpecName: "console-config") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.798125 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.798171 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca" (OuterVolumeSpecName: "service-ca") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.803096 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh" (OuterVolumeSpecName: "kube-api-access-pqxmh") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "kube-api-access-pqxmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.804049 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.808960 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4461eec4-354b-417f-b8ae-24e3deed3a5a" (UID: "4461eec4-354b-417f-b8ae-24e3deed3a5a"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897791 4938 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897826 4938 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897836 4938 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897845 4938 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4461eec4-354b-417f-b8ae-24e3deed3a5a-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897856 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqxmh\" (UniqueName: \"kubernetes.io/projected/4461eec4-354b-417f-b8ae-24e3deed3a5a-kube-api-access-pqxmh\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897864 4938 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:03 crc kubenswrapper[4938]: I1122 10:52:03.897871 4938 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4461eec4-354b-417f-b8ae-24e3deed3a5a-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417272 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rl6xd_4461eec4-354b-417f-b8ae-24e3deed3a5a/console/0.log" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417323 4938 generic.go:334] "Generic (PLEG): container finished" podID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerID="1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb" exitCode=2 Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417353 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rl6xd" event={"ID":"4461eec4-354b-417f-b8ae-24e3deed3a5a","Type":"ContainerDied","Data":"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb"} Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417379 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rl6xd" event={"ID":"4461eec4-354b-417f-b8ae-24e3deed3a5a","Type":"ContainerDied","Data":"051d817eff1df65881caefa8936d42e3b180e592073f94727b816037a5dc3512"} Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417396 4938 scope.go:117] "RemoveContainer" containerID="1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.417510 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rl6xd" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.437511 4938 scope.go:117] "RemoveContainer" containerID="1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb" Nov 22 10:52:04 crc kubenswrapper[4938]: E1122 10:52:04.438205 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb\": container with ID starting with 1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb not found: ID does not exist" containerID="1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.438246 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb"} err="failed to get container status \"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb\": rpc error: code = NotFound desc = could not find container \"1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb\": container with ID starting with 1b9eee81afb79a19eea1bfeaf4ed1a5f8b43b254e7d357049d6c7dd856f2dceb not found: ID does not exist" Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.457633 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:52:04 crc kubenswrapper[4938]: I1122 10:52:04.457677 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-rl6xd"] Nov 22 10:52:05 crc kubenswrapper[4938]: I1122 10:52:05.423885 4938 generic.go:334] "Generic (PLEG): container finished" podID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerID="707e406fc168b80f1349afe3e8cc82196fba9eed4210e4fe7ad6f1a8988f7ce5" exitCode=0 Nov 22 10:52:05 crc kubenswrapper[4938]: I1122 10:52:05.424963 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" event={"ID":"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7","Type":"ContainerDied","Data":"707e406fc168b80f1349afe3e8cc82196fba9eed4210e4fe7ad6f1a8988f7ce5"} Nov 22 10:52:06 crc kubenswrapper[4938]: I1122 10:52:06.435893 4938 generic.go:334] "Generic (PLEG): container finished" podID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerID="5340aa9a8afec7de57ccdba67cd5f34395874a693e2cad13e89d6e6eb7ffe305" exitCode=0 Nov 22 10:52:06 crc kubenswrapper[4938]: I1122 10:52:06.436004 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" event={"ID":"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7","Type":"ContainerDied","Data":"5340aa9a8afec7de57ccdba67cd5f34395874a693e2cad13e89d6e6eb7ffe305"} Nov 22 10:52:06 crc kubenswrapper[4938]: I1122 10:52:06.475075 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" path="/var/lib/kubelet/pods/4461eec4-354b-417f-b8ae-24e3deed3a5a/volumes" Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.809230 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.950900 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scn8r\" (UniqueName: \"kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r\") pod \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.951802 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle\") pod \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.951857 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util\") pod \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\" (UID: \"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7\") " Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.952605 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle" (OuterVolumeSpecName: "bundle") pod "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" (UID: "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.959865 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r" (OuterVolumeSpecName: "kube-api-access-scn8r") pod "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" (UID: "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7"). InnerVolumeSpecName "kube-api-access-scn8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:07 crc kubenswrapper[4938]: I1122 10:52:07.965251 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util" (OuterVolumeSpecName: "util") pod "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" (UID: "6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.054210 4938 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.054250 4938 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.054264 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scn8r\" (UniqueName: \"kubernetes.io/projected/6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7-kube-api-access-scn8r\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.452865 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.456779 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs" event={"ID":"6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7","Type":"ContainerDied","Data":"6694075e07c08bfdd073f2fa2aef7bc0e0c390fd25fcc935ffe11c3ec9e42bab"} Nov 22 10:52:08 crc kubenswrapper[4938]: I1122 10:52:08.456826 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6694075e07c08bfdd073f2fa2aef7bc0e0c390fd25fcc935ffe11c3ec9e42bab" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369114 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8"] Nov 22 10:52:18 crc kubenswrapper[4938]: E1122 10:52:18.369798 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="util" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369810 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="util" Nov 22 10:52:18 crc kubenswrapper[4938]: E1122 10:52:18.369820 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369825 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" Nov 22 10:52:18 crc kubenswrapper[4938]: E1122 10:52:18.369842 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="extract" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369848 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="extract" Nov 22 10:52:18 crc kubenswrapper[4938]: E1122 10:52:18.369856 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="pull" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369861 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="pull" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369971 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="4461eec4-354b-417f-b8ae-24e3deed3a5a" containerName="console" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.369986 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7" containerName="extract" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.370346 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.372249 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.375216 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.375351 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.375451 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.376525 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-5vjx7" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.390729 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8"] Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.472668 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4k8v\" (UniqueName: \"kubernetes.io/projected/0ff86a67-1bc4-4f45-82ae-cd10727037d6-kube-api-access-b4k8v\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.472722 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-webhook-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.472768 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-apiservice-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.574088 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4k8v\" (UniqueName: \"kubernetes.io/projected/0ff86a67-1bc4-4f45-82ae-cd10727037d6-kube-api-access-b4k8v\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.574554 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-webhook-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.575673 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-apiservice-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.580960 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-apiservice-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.585518 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0ff86a67-1bc4-4f45-82ae-cd10727037d6-webhook-cert\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.602136 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4k8v\" (UniqueName: \"kubernetes.io/projected/0ff86a67-1bc4-4f45-82ae-cd10727037d6-kube-api-access-b4k8v\") pod \"metallb-operator-controller-manager-868865b9b5-bxgn8\" (UID: \"0ff86a67-1bc4-4f45-82ae-cd10727037d6\") " pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.609885 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4"] Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.610808 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.615290 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.615355 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-h8g9k" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.615595 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.623553 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4"] Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.676678 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-webhook-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.676725 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-apiservice-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.676802 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfcq9\" (UniqueName: \"kubernetes.io/projected/2490230a-e04c-4569-8870-174b949c7ce6-kube-api-access-rfcq9\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.685544 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.777547 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfcq9\" (UniqueName: \"kubernetes.io/projected/2490230a-e04c-4569-8870-174b949c7ce6-kube-api-access-rfcq9\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.777623 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-webhook-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.777647 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-apiservice-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.781454 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-webhook-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.802736 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2490230a-e04c-4569-8870-174b949c7ce6-apiservice-cert\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.832831 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfcq9\" (UniqueName: \"kubernetes.io/projected/2490230a-e04c-4569-8870-174b949c7ce6-kube-api-access-rfcq9\") pod \"metallb-operator-webhook-server-66d55db99c-wjdl4\" (UID: \"2490230a-e04c-4569-8870-174b949c7ce6\") " pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:18 crc kubenswrapper[4938]: I1122 10:52:18.948492 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:19 crc kubenswrapper[4938]: I1122 10:52:19.251043 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8"] Nov 22 10:52:19 crc kubenswrapper[4938]: I1122 10:52:19.366771 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4"] Nov 22 10:52:19 crc kubenswrapper[4938]: W1122 10:52:19.370802 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2490230a_e04c_4569_8870_174b949c7ce6.slice/crio-05d509274374cb21e61f1900ba780903d076f92afa9743f5aa409be285c12c74 WatchSource:0}: Error finding container 05d509274374cb21e61f1900ba780903d076f92afa9743f5aa409be285c12c74: Status 404 returned error can't find the container with id 05d509274374cb21e61f1900ba780903d076f92afa9743f5aa409be285c12c74 Nov 22 10:52:19 crc kubenswrapper[4938]: I1122 10:52:19.510073 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" event={"ID":"2490230a-e04c-4569-8870-174b949c7ce6","Type":"ContainerStarted","Data":"05d509274374cb21e61f1900ba780903d076f92afa9743f5aa409be285c12c74"} Nov 22 10:52:19 crc kubenswrapper[4938]: I1122 10:52:19.511197 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" event={"ID":"0ff86a67-1bc4-4f45-82ae-cd10727037d6","Type":"ContainerStarted","Data":"91d1a9845e4fc0af60a6eb1e68c311b1c2672e2740d4b9237df7d9d1d621070e"} Nov 22 10:52:22 crc kubenswrapper[4938]: I1122 10:52:22.531200 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" event={"ID":"0ff86a67-1bc4-4f45-82ae-cd10727037d6","Type":"ContainerStarted","Data":"2b8da29374e9dab9425b12ee630a1c1ef50c0a7c5bac860431c8a990112dcb3c"} Nov 22 10:52:22 crc kubenswrapper[4938]: I1122 10:52:22.531854 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:22 crc kubenswrapper[4938]: I1122 10:52:22.553253 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" podStartSLOduration=1.709405312 podStartE2EDuration="4.553232379s" podCreationTimestamp="2025-11-22 10:52:18 +0000 UTC" firstStartedPulling="2025-11-22 10:52:19.258537044 +0000 UTC m=+871.726374443" lastFinishedPulling="2025-11-22 10:52:22.102364111 +0000 UTC m=+874.570201510" observedRunningTime="2025-11-22 10:52:22.546827409 +0000 UTC m=+875.014664808" watchObservedRunningTime="2025-11-22 10:52:22.553232379 +0000 UTC m=+875.021069778" Nov 22 10:52:24 crc kubenswrapper[4938]: I1122 10:52:24.543338 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" event={"ID":"2490230a-e04c-4569-8870-174b949c7ce6","Type":"ContainerStarted","Data":"9b457127b1c84c693d20a79cb9c26afd6881a56459c2203c13081fe48b9466f2"} Nov 22 10:52:24 crc kubenswrapper[4938]: I1122 10:52:24.543676 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:24 crc kubenswrapper[4938]: I1122 10:52:24.567804 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" podStartSLOduration=2.370205206 podStartE2EDuration="6.567779847s" podCreationTimestamp="2025-11-22 10:52:18 +0000 UTC" firstStartedPulling="2025-11-22 10:52:19.374234416 +0000 UTC m=+871.842071815" lastFinishedPulling="2025-11-22 10:52:23.571809057 +0000 UTC m=+876.039646456" observedRunningTime="2025-11-22 10:52:24.562296539 +0000 UTC m=+877.030133938" watchObservedRunningTime="2025-11-22 10:52:24.567779847 +0000 UTC m=+877.035617246" Nov 22 10:52:38 crc kubenswrapper[4938]: I1122 10:52:38.955689 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-66d55db99c-wjdl4" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.215472 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.217178 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.229452 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.241109 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.241203 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9dkd\" (UniqueName: \"kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.241259 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.342039 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.342231 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9dkd\" (UniqueName: \"kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.342272 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.342482 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.342615 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.363593 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9dkd\" (UniqueName: \"kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd\") pod \"redhat-marketplace-ngbcc\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.547434 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:41 crc kubenswrapper[4938]: I1122 10:52:41.981474 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:42 crc kubenswrapper[4938]: I1122 10:52:42.636900 4938 generic.go:334] "Generic (PLEG): container finished" podID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerID="2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944" exitCode=0 Nov 22 10:52:42 crc kubenswrapper[4938]: I1122 10:52:42.637014 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerDied","Data":"2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944"} Nov 22 10:52:42 crc kubenswrapper[4938]: I1122 10:52:42.637191 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerStarted","Data":"d5476a27d8ad27c46d6ad68d84d303c449727725016b0bfd44002086cc53018c"} Nov 22 10:52:43 crc kubenswrapper[4938]: I1122 10:52:43.645295 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerStarted","Data":"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72"} Nov 22 10:52:44 crc kubenswrapper[4938]: I1122 10:52:44.655023 4938 generic.go:334] "Generic (PLEG): container finished" podID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerID="59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72" exitCode=0 Nov 22 10:52:44 crc kubenswrapper[4938]: I1122 10:52:44.655074 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerDied","Data":"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72"} Nov 22 10:52:45 crc kubenswrapper[4938]: I1122 10:52:45.661981 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerStarted","Data":"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13"} Nov 22 10:52:45 crc kubenswrapper[4938]: I1122 10:52:45.690034 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ngbcc" podStartSLOduration=2.27894618 podStartE2EDuration="4.690015564s" podCreationTimestamp="2025-11-22 10:52:41 +0000 UTC" firstStartedPulling="2025-11-22 10:52:42.638708853 +0000 UTC m=+895.106546252" lastFinishedPulling="2025-11-22 10:52:45.049778237 +0000 UTC m=+897.517615636" observedRunningTime="2025-11-22 10:52:45.684198108 +0000 UTC m=+898.152035547" watchObservedRunningTime="2025-11-22 10:52:45.690015564 +0000 UTC m=+898.157852963" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.404226 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.417356 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.430168 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.547945 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.548671 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.569567 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6nlx\" (UniqueName: \"kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.569839 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.569925 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.591002 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.670363 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.670406 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.670459 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6nlx\" (UniqueName: \"kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.670808 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.670864 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.688251 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6nlx\" (UniqueName: \"kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx\") pod \"community-operators-25n5q\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.734315 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:51 crc kubenswrapper[4938]: I1122 10:52:51.741800 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:52:52 crc kubenswrapper[4938]: I1122 10:52:52.183994 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:52:52 crc kubenswrapper[4938]: W1122 10:52:52.190150 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4499684_f08b_43b2_ba11_dae0e8c40c54.slice/crio-dad755402b595672d733dd4b953d09362b41a8e01fcd57bd695c8827e5c28fae WatchSource:0}: Error finding container dad755402b595672d733dd4b953d09362b41a8e01fcd57bd695c8827e5c28fae: Status 404 returned error can't find the container with id dad755402b595672d733dd4b953d09362b41a8e01fcd57bd695c8827e5c28fae Nov 22 10:52:52 crc kubenswrapper[4938]: I1122 10:52:52.700783 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerID="a08dc9cb168319dadaa88b73084d7b0cbd0ea08ed4268a03d5ca2340dcecb1f4" exitCode=0 Nov 22 10:52:52 crc kubenswrapper[4938]: I1122 10:52:52.700870 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerDied","Data":"a08dc9cb168319dadaa88b73084d7b0cbd0ea08ed4268a03d5ca2340dcecb1f4"} Nov 22 10:52:52 crc kubenswrapper[4938]: I1122 10:52:52.701235 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerStarted","Data":"dad755402b595672d733dd4b953d09362b41a8e01fcd57bd695c8827e5c28fae"} Nov 22 10:52:53 crc kubenswrapper[4938]: I1122 10:52:53.709061 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerID="7e664f43d87b83736bbd05d39bcc0e4d8d2712c85172e3227c181dc49e6a35aa" exitCode=0 Nov 22 10:52:53 crc kubenswrapper[4938]: I1122 10:52:53.709140 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerDied","Data":"7e664f43d87b83736bbd05d39bcc0e4d8d2712c85172e3227c181dc49e6a35aa"} Nov 22 10:52:54 crc kubenswrapper[4938]: I1122 10:52:54.716972 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerStarted","Data":"61b754da827585578451aae03ea98b04a746af2da44e923c4e5a0440d27f97a9"} Nov 22 10:52:54 crc kubenswrapper[4938]: I1122 10:52:54.731751 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-25n5q" podStartSLOduration=2.336034293 podStartE2EDuration="3.731731093s" podCreationTimestamp="2025-11-22 10:52:51 +0000 UTC" firstStartedPulling="2025-11-22 10:52:52.702440574 +0000 UTC m=+905.170277973" lastFinishedPulling="2025-11-22 10:52:54.098137374 +0000 UTC m=+906.565974773" observedRunningTime="2025-11-22 10:52:54.730103682 +0000 UTC m=+907.197941081" watchObservedRunningTime="2025-11-22 10:52:54.731731093 +0000 UTC m=+907.199568512" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.201587 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.201805 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ngbcc" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="registry-server" containerID="cri-o://a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13" gracePeriod=2 Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.550538 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.720868 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9dkd\" (UniqueName: \"kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd\") pod \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.721239 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities\") pod \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.721283 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content\") pod \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\" (UID: \"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d\") " Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.722404 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities" (OuterVolumeSpecName: "utilities") pod "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" (UID: "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.726638 4938 generic.go:334] "Generic (PLEG): container finished" podID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerID="a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13" exitCode=0 Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.726697 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ngbcc" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.726705 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerDied","Data":"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13"} Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.726745 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ngbcc" event={"ID":"dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d","Type":"ContainerDied","Data":"d5476a27d8ad27c46d6ad68d84d303c449727725016b0bfd44002086cc53018c"} Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.726766 4938 scope.go:117] "RemoveContainer" containerID="a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.728377 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd" (OuterVolumeSpecName: "kube-api-access-p9dkd") pod "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" (UID: "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d"). InnerVolumeSpecName "kube-api-access-p9dkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.739250 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" (UID: "dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.764423 4938 scope.go:117] "RemoveContainer" containerID="59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.778345 4938 scope.go:117] "RemoveContainer" containerID="2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.803253 4938 scope.go:117] "RemoveContainer" containerID="a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13" Nov 22 10:52:55 crc kubenswrapper[4938]: E1122 10:52:55.803893 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13\": container with ID starting with a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13 not found: ID does not exist" containerID="a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.803955 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13"} err="failed to get container status \"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13\": rpc error: code = NotFound desc = could not find container \"a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13\": container with ID starting with a7daf502eb24595b27283a95f8775e9aec06e82f170b6f793428fa59913dbe13 not found: ID does not exist" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.803982 4938 scope.go:117] "RemoveContainer" containerID="59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72" Nov 22 10:52:55 crc kubenswrapper[4938]: E1122 10:52:55.804396 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72\": container with ID starting with 59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72 not found: ID does not exist" containerID="59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.804446 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72"} err="failed to get container status \"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72\": rpc error: code = NotFound desc = could not find container \"59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72\": container with ID starting with 59193b7454fca4a83757e7718eafb67a7c7f49fdd3d5fdf7764aa02df4d68d72 not found: ID does not exist" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.804499 4938 scope.go:117] "RemoveContainer" containerID="2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944" Nov 22 10:52:55 crc kubenswrapper[4938]: E1122 10:52:55.804778 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944\": container with ID starting with 2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944 not found: ID does not exist" containerID="2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.804807 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944"} err="failed to get container status \"2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944\": rpc error: code = NotFound desc = could not find container \"2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944\": container with ID starting with 2c1867cdc318f97e407c816e7b7a5079819cf1e5a279950c6c55423038bb9944 not found: ID does not exist" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.822977 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9dkd\" (UniqueName: \"kubernetes.io/projected/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-kube-api-access-p9dkd\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.823010 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:55 crc kubenswrapper[4938]: I1122 10:52:55.823032 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:56 crc kubenswrapper[4938]: I1122 10:52:56.051937 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:56 crc kubenswrapper[4938]: I1122 10:52:56.056452 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ngbcc"] Nov 22 10:52:56 crc kubenswrapper[4938]: I1122 10:52:56.455978 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" path="/var/lib/kubelet/pods/dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d/volumes" Nov 22 10:52:58 crc kubenswrapper[4938]: I1122 10:52:58.688660 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-868865b9b5-bxgn8" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.368966 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-9gsl5"] Nov 22 10:52:59 crc kubenswrapper[4938]: E1122 10:52:59.369327 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="registry-server" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.369348 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="registry-server" Nov 22 10:52:59 crc kubenswrapper[4938]: E1122 10:52:59.369366 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="extract-utilities" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.369373 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="extract-utilities" Nov 22 10:52:59 crc kubenswrapper[4938]: E1122 10:52:59.369385 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="extract-content" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.369393 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="extract-content" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.369573 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcf873f7-3d59-4cb2-a1f0-3929bde1ec1d" containerName="registry-server" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.372406 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc"] Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.372595 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.373544 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.376338 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.376628 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-q7gbx" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.387605 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.389493 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.395070 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc"] Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.455300 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-5l59v"] Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.456536 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.458853 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.459010 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-hz5k4" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.459835 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.461100 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.471247 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-b667v"] Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.472401 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.473812 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.492942 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-b667v"] Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.565263 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics-certs\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.565317 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-conf\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.565622 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pghd8\" (UniqueName: \"kubernetes.io/projected/9c76a23c-e78d-422c-90aa-7cb20ab288c6-kube-api-access-pghd8\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566082 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-sockets\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566142 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-metrics-certs\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566262 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566327 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566372 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8fde61c9-db97-436d-8ee1-852084695193-metallb-excludel2\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566425 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-startup\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566556 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkh8s\" (UniqueName: \"kubernetes.io/projected/8fde61c9-db97-436d-8ee1-852084695193-kube-api-access-lkh8s\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566606 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-reloader\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566642 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xmrz\" (UniqueName: \"kubernetes.io/projected/01cf09eb-20ee-4493-9b69-49beca431020-kube-api-access-9xmrz\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.566679 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01cf09eb-20ee-4493-9b69-49beca431020-cert\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667498 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667545 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667570 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8fde61c9-db97-436d-8ee1-852084695193-metallb-excludel2\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667588 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-startup\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667607 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-cert\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667627 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-metrics-certs\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667646 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkh8s\" (UniqueName: \"kubernetes.io/projected/8fde61c9-db97-436d-8ee1-852084695193-kube-api-access-lkh8s\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667662 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-reloader\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667683 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xmrz\" (UniqueName: \"kubernetes.io/projected/01cf09eb-20ee-4493-9b69-49beca431020-kube-api-access-9xmrz\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: E1122 10:52:59.667680 4938 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667704 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01cf09eb-20ee-4493-9b69-49beca431020-cert\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.667732 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics-certs\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: E1122 10:52:59.668095 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist podName:8fde61c9-db97-436d-8ee1-852084695193 nodeName:}" failed. No retries permitted until 2025-11-22 10:53:00.167737564 +0000 UTC m=+912.635574963 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist") pod "speaker-5l59v" (UID: "8fde61c9-db97-436d-8ee1-852084695193") : secret "metallb-memberlist" not found Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668133 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-conf\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668317 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-reloader\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668438 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668558 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-conf\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668687 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pghd8\" (UniqueName: \"kubernetes.io/projected/9c76a23c-e78d-422c-90aa-7cb20ab288c6-kube-api-access-pghd8\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.668886 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8fde61c9-db97-436d-8ee1-852084695193-metallb-excludel2\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.669087 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-sockets\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.669118 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-sockets\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.669144 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-metrics-certs\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.669174 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbhp4\" (UniqueName: \"kubernetes.io/projected/a4e581ed-7db0-4270-9353-ab48412b2994-kube-api-access-fbhp4\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.669125 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9c76a23c-e78d-422c-90aa-7cb20ab288c6-frr-startup\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.682558 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-metrics-certs\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.682585 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9c76a23c-e78d-422c-90aa-7cb20ab288c6-metrics-certs\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.683608 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/01cf09eb-20ee-4493-9b69-49beca431020-cert\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.688431 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkh8s\" (UniqueName: \"kubernetes.io/projected/8fde61c9-db97-436d-8ee1-852084695193-kube-api-access-lkh8s\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.688878 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xmrz\" (UniqueName: \"kubernetes.io/projected/01cf09eb-20ee-4493-9b69-49beca431020-kube-api-access-9xmrz\") pod \"frr-k8s-webhook-server-6998585d5-gvwwc\" (UID: \"01cf09eb-20ee-4493-9b69-49beca431020\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.695367 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pghd8\" (UniqueName: \"kubernetes.io/projected/9c76a23c-e78d-422c-90aa-7cb20ab288c6-kube-api-access-pghd8\") pod \"frr-k8s-9gsl5\" (UID: \"9c76a23c-e78d-422c-90aa-7cb20ab288c6\") " pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.706441 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.770654 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbhp4\" (UniqueName: \"kubernetes.io/projected/a4e581ed-7db0-4270-9353-ab48412b2994-kube-api-access-fbhp4\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.771012 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-cert\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.771149 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-metrics-certs\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.772684 4938 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.774578 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-metrics-certs\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.785045 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbhp4\" (UniqueName: \"kubernetes.io/projected/a4e581ed-7db0-4270-9353-ab48412b2994-kube-api-access-fbhp4\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.785273 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a4e581ed-7db0-4270-9353-ab48412b2994-cert\") pod \"controller-6c7b4b5f48-b667v\" (UID: \"a4e581ed-7db0-4270-9353-ab48412b2994\") " pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.787735 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:52:59 crc kubenswrapper[4938]: I1122 10:52:59.993076 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.027985 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-b667v"] Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.124623 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc"] Nov 22 10:53:00 crc kubenswrapper[4938]: W1122 10:53:00.135550 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01cf09eb_20ee_4493_9b69_49beca431020.slice/crio-af5a7ec13b604ca60bef3a1de0697a2b3f34a1db798a09aba31f0ca07826922e WatchSource:0}: Error finding container af5a7ec13b604ca60bef3a1de0697a2b3f34a1db798a09aba31f0ca07826922e: Status 404 returned error can't find the container with id af5a7ec13b604ca60bef3a1de0697a2b3f34a1db798a09aba31f0ca07826922e Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.175237 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:53:00 crc kubenswrapper[4938]: E1122 10:53:00.175392 4938 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 10:53:00 crc kubenswrapper[4938]: E1122 10:53:00.175440 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist podName:8fde61c9-db97-436d-8ee1-852084695193 nodeName:}" failed. No retries permitted until 2025-11-22 10:53:01.175425535 +0000 UTC m=+913.643262934 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist") pod "speaker-5l59v" (UID: "8fde61c9-db97-436d-8ee1-852084695193") : secret "metallb-memberlist" not found Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.754382 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"0ebd2b07e5b51d39ab8d7edc79aa4ac017a3b8b2ea038d6728115e48db3415c2"} Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.756258 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-b667v" event={"ID":"a4e581ed-7db0-4270-9353-ab48412b2994","Type":"ContainerStarted","Data":"582f06c202db3ae46626b20698b477b3c322c46ddef7155ffb3190ee51917ffa"} Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.756287 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-b667v" event={"ID":"a4e581ed-7db0-4270-9353-ab48412b2994","Type":"ContainerStarted","Data":"15be6d0404c7fd034941496e59201039c177f4db468de5e7943903983d258047"} Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.756297 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-b667v" event={"ID":"a4e581ed-7db0-4270-9353-ab48412b2994","Type":"ContainerStarted","Data":"a39f7401425e537196315723ad52ff230a4bd359c359c6ff96a0e447cc0e332a"} Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.756456 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.757330 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" event={"ID":"01cf09eb-20ee-4493-9b69-49beca431020","Type":"ContainerStarted","Data":"af5a7ec13b604ca60bef3a1de0697a2b3f34a1db798a09aba31f0ca07826922e"} Nov 22 10:53:00 crc kubenswrapper[4938]: I1122 10:53:00.774479 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-b667v" podStartSLOduration=1.774460446 podStartE2EDuration="1.774460446s" podCreationTimestamp="2025-11-22 10:52:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:53:00.771941172 +0000 UTC m=+913.239778581" watchObservedRunningTime="2025-11-22 10:53:00.774460446 +0000 UTC m=+913.242297845" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.186275 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.212464 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8fde61c9-db97-436d-8ee1-852084695193-memberlist\") pod \"speaker-5l59v\" (UID: \"8fde61c9-db97-436d-8ee1-852084695193\") " pod="metallb-system/speaker-5l59v" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.271407 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5l59v" Nov 22 10:53:01 crc kubenswrapper[4938]: W1122 10:53:01.295114 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fde61c9_db97_436d_8ee1_852084695193.slice/crio-7e6e2f0809a485f806339c9f927b02b8d6ce5d67c29317357699e5472820d165 WatchSource:0}: Error finding container 7e6e2f0809a485f806339c9f927b02b8d6ce5d67c29317357699e5472820d165: Status 404 returned error can't find the container with id 7e6e2f0809a485f806339c9f927b02b8d6ce5d67c29317357699e5472820d165 Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.743156 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.743259 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.767617 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5l59v" event={"ID":"8fde61c9-db97-436d-8ee1-852084695193","Type":"ContainerStarted","Data":"579f36fa7df2cfcc45e45c9420303219aa3ff87f71c485499a119628842cd3c5"} Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.767705 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5l59v" event={"ID":"8fde61c9-db97-436d-8ee1-852084695193","Type":"ContainerStarted","Data":"203d4b6020ec5eeb9069612500f6a4384d580cd0c6228a7767886dd2fd4459d6"} Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.767730 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5l59v" event={"ID":"8fde61c9-db97-436d-8ee1-852084695193","Type":"ContainerStarted","Data":"7e6e2f0809a485f806339c9f927b02b8d6ce5d67c29317357699e5472820d165"} Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.767998 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-5l59v" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.788102 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-5l59v" podStartSLOduration=2.788084736 podStartE2EDuration="2.788084736s" podCreationTimestamp="2025-11-22 10:52:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:53:01.782497434 +0000 UTC m=+914.250334843" watchObservedRunningTime="2025-11-22 10:53:01.788084736 +0000 UTC m=+914.255922135" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.792348 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:01 crc kubenswrapper[4938]: I1122 10:53:01.836066 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.411337 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.412703 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.430166 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2t98\" (UniqueName: \"kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.430472 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.430503 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.441249 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.531969 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2t98\" (UniqueName: \"kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.532098 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.532140 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.532687 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.533132 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.561749 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2t98\" (UniqueName: \"kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98\") pod \"certified-operators-kjx4c\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:03 crc kubenswrapper[4938]: I1122 10:53:03.732192 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:04 crc kubenswrapper[4938]: I1122 10:53:04.091966 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:04 crc kubenswrapper[4938]: I1122 10:53:04.795790 4938 generic.go:334] "Generic (PLEG): container finished" podID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerID="607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7" exitCode=0 Nov 22 10:53:04 crc kubenswrapper[4938]: I1122 10:53:04.795839 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerDied","Data":"607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7"} Nov 22 10:53:04 crc kubenswrapper[4938]: I1122 10:53:04.795893 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerStarted","Data":"fbc4458729000e97ba504fee3ba563d35ea7488e7111e92445885f198a778be7"} Nov 22 10:53:05 crc kubenswrapper[4938]: I1122 10:53:05.411039 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:53:05 crc kubenswrapper[4938]: I1122 10:53:05.411550 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-25n5q" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="registry-server" containerID="cri-o://61b754da827585578451aae03ea98b04a746af2da44e923c4e5a0440d27f97a9" gracePeriod=2 Nov 22 10:53:05 crc kubenswrapper[4938]: I1122 10:53:05.802523 4938 generic.go:334] "Generic (PLEG): container finished" podID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerID="61b754da827585578451aae03ea98b04a746af2da44e923c4e5a0440d27f97a9" exitCode=0 Nov 22 10:53:05 crc kubenswrapper[4938]: I1122 10:53:05.802566 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerDied","Data":"61b754da827585578451aae03ea98b04a746af2da44e923c4e5a0440d27f97a9"} Nov 22 10:53:08 crc kubenswrapper[4938]: I1122 10:53:08.968960 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.151123 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content\") pod \"f4499684-f08b-43b2-ba11-dae0e8c40c54\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.151526 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6nlx\" (UniqueName: \"kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx\") pod \"f4499684-f08b-43b2-ba11-dae0e8c40c54\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.152172 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities\") pod \"f4499684-f08b-43b2-ba11-dae0e8c40c54\" (UID: \"f4499684-f08b-43b2-ba11-dae0e8c40c54\") " Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.152869 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities" (OuterVolumeSpecName: "utilities") pod "f4499684-f08b-43b2-ba11-dae0e8c40c54" (UID: "f4499684-f08b-43b2-ba11-dae0e8c40c54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.161027 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx" (OuterVolumeSpecName: "kube-api-access-x6nlx") pod "f4499684-f08b-43b2-ba11-dae0e8c40c54" (UID: "f4499684-f08b-43b2-ba11-dae0e8c40c54"). InnerVolumeSpecName "kube-api-access-x6nlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.206004 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4499684-f08b-43b2-ba11-dae0e8c40c54" (UID: "f4499684-f08b-43b2-ba11-dae0e8c40c54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.253454 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6nlx\" (UniqueName: \"kubernetes.io/projected/f4499684-f08b-43b2-ba11-dae0e8c40c54-kube-api-access-x6nlx\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.253507 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.253535 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4499684-f08b-43b2-ba11-dae0e8c40c54-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.835503 4938 generic.go:334] "Generic (PLEG): container finished" podID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerID="24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7" exitCode=0 Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.835585 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerDied","Data":"24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7"} Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.838326 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" event={"ID":"01cf09eb-20ee-4493-9b69-49beca431020","Type":"ContainerStarted","Data":"bf53dc37060875a14c83ee6473bad4b80c47c312ae877d797b64c9e55370c6ba"} Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.838596 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.841702 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25n5q" event={"ID":"f4499684-f08b-43b2-ba11-dae0e8c40c54","Type":"ContainerDied","Data":"dad755402b595672d733dd4b953d09362b41a8e01fcd57bd695c8827e5c28fae"} Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.841770 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25n5q" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.841790 4938 scope.go:117] "RemoveContainer" containerID="61b754da827585578451aae03ea98b04a746af2da44e923c4e5a0440d27f97a9" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.848713 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c76a23c-e78d-422c-90aa-7cb20ab288c6" containerID="99fba344f3db290614f754d8e8c42a85ccbc1691f9ade8a402130ed9825c9c61" exitCode=0 Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.848758 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerDied","Data":"99fba344f3db290614f754d8e8c42a85ccbc1691f9ade8a402130ed9825c9c61"} Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.867517 4938 scope.go:117] "RemoveContainer" containerID="7e664f43d87b83736bbd05d39bcc0e4d8d2712c85172e3227c181dc49e6a35aa" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.914150 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" podStartSLOduration=2.250579447 podStartE2EDuration="10.914128301s" podCreationTimestamp="2025-11-22 10:52:59 +0000 UTC" firstStartedPulling="2025-11-22 10:53:00.137505692 +0000 UTC m=+912.605343091" lastFinishedPulling="2025-11-22 10:53:08.801054546 +0000 UTC m=+921.268891945" observedRunningTime="2025-11-22 10:53:09.906937828 +0000 UTC m=+922.374775247" watchObservedRunningTime="2025-11-22 10:53:09.914128301 +0000 UTC m=+922.381965700" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.922232 4938 scope.go:117] "RemoveContainer" containerID="a08dc9cb168319dadaa88b73084d7b0cbd0ea08ed4268a03d5ca2340dcecb1f4" Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.924899 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:53:09 crc kubenswrapper[4938]: I1122 10:53:09.929386 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-25n5q"] Nov 22 10:53:10 crc kubenswrapper[4938]: I1122 10:53:10.455490 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" path="/var/lib/kubelet/pods/f4499684-f08b-43b2-ba11-dae0e8c40c54/volumes" Nov 22 10:53:10 crc kubenswrapper[4938]: I1122 10:53:10.858874 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c76a23c-e78d-422c-90aa-7cb20ab288c6" containerID="93bf75b2c83c6bdc6486c834b39f787de430b45961e598d3097a1283eb3b040e" exitCode=0 Nov 22 10:53:10 crc kubenswrapper[4938]: I1122 10:53:10.859048 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerDied","Data":"93bf75b2c83c6bdc6486c834b39f787de430b45961e598d3097a1283eb3b040e"} Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.287090 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-5l59v" Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.301036 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.301087 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.865638 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerStarted","Data":"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027"} Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.868030 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c76a23c-e78d-422c-90aa-7cb20ab288c6" containerID="41a6ebf81a26f9578f02f942b1b2c8dfc209c43007a8a18b3966f027b64a1d60" exitCode=0 Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.868059 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerDied","Data":"41a6ebf81a26f9578f02f942b1b2c8dfc209c43007a8a18b3966f027b64a1d60"} Nov 22 10:53:11 crc kubenswrapper[4938]: I1122 10:53:11.887256 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kjx4c" podStartSLOduration=2.706012904 podStartE2EDuration="8.887231773s" podCreationTimestamp="2025-11-22 10:53:03 +0000 UTC" firstStartedPulling="2025-11-22 10:53:04.797934095 +0000 UTC m=+917.265771494" lastFinishedPulling="2025-11-22 10:53:10.979152944 +0000 UTC m=+923.446990363" observedRunningTime="2025-11-22 10:53:11.882120654 +0000 UTC m=+924.349958063" watchObservedRunningTime="2025-11-22 10:53:11.887231773 +0000 UTC m=+924.355069182" Nov 22 10:53:12 crc kubenswrapper[4938]: I1122 10:53:12.876073 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"8b95748bc60478152eebb006d541bfdabdfb79a42fd9b5c9549827adbe5e570e"} Nov 22 10:53:12 crc kubenswrapper[4938]: I1122 10:53:12.876430 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"05df8a0d0e31650458845eb35abcf88ba694a13bb52ea02e599833dc06489c02"} Nov 22 10:53:12 crc kubenswrapper[4938]: I1122 10:53:12.876446 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"6cc5905e7a10406bb593cfb7ad4c9666ea98ba10791b90d30910bd047993199c"} Nov 22 10:53:12 crc kubenswrapper[4938]: I1122 10:53:12.876458 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"23be115b9f164f733ac7d170c203a51012b1ba79a8e1d64486805db858346239"} Nov 22 10:53:12 crc kubenswrapper[4938]: I1122 10:53:12.876468 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"fa9ae6ab13093067d0b0254c57ae21127fe9f18ad511757180018f3ea2ce4450"} Nov 22 10:53:13 crc kubenswrapper[4938]: I1122 10:53:13.733012 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:13 crc kubenswrapper[4938]: I1122 10:53:13.733274 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:13 crc kubenswrapper[4938]: I1122 10:53:13.770964 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:13 crc kubenswrapper[4938]: I1122 10:53:13.885839 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9gsl5" event={"ID":"9c76a23c-e78d-422c-90aa-7cb20ab288c6","Type":"ContainerStarted","Data":"e6681083221a6667ec569bb8bd603910b87f595f44756e35a24bffaeed6cde58"} Nov 22 10:53:13 crc kubenswrapper[4938]: I1122 10:53:13.907190 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-9gsl5" podStartSLOduration=6.27349169 podStartE2EDuration="14.907171946s" podCreationTimestamp="2025-11-22 10:52:59 +0000 UTC" firstStartedPulling="2025-11-22 10:53:00.12364075 +0000 UTC m=+912.591478149" lastFinishedPulling="2025-11-22 10:53:08.757321006 +0000 UTC m=+921.225158405" observedRunningTime="2025-11-22 10:53:13.907173396 +0000 UTC m=+926.375010795" watchObservedRunningTime="2025-11-22 10:53:13.907171946 +0000 UTC m=+926.375009335" Nov 22 10:53:14 crc kubenswrapper[4938]: I1122 10:53:14.891956 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:53:14 crc kubenswrapper[4938]: I1122 10:53:14.994355 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:53:15 crc kubenswrapper[4938]: I1122 10:53:15.034537 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:53:19 crc kubenswrapper[4938]: I1122 10:53:19.711958 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-gvwwc" Nov 22 10:53:19 crc kubenswrapper[4938]: I1122 10:53:19.790600 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-b667v" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.612667 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:21 crc kubenswrapper[4938]: E1122 10:53:21.613203 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="registry-server" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.613215 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="registry-server" Nov 22 10:53:21 crc kubenswrapper[4938]: E1122 10:53:21.613229 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="extract-utilities" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.613235 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="extract-utilities" Nov 22 10:53:21 crc kubenswrapper[4938]: E1122 10:53:21.613255 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="extract-content" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.613262 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="extract-content" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.613361 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4499684-f08b-43b2-ba11-dae0e8c40c54" containerName="registry-server" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.613748 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.615687 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.615923 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-fq42w" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.616191 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.629137 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.747167 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxmmm\" (UniqueName: \"kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm\") pod \"openstack-operator-index-frq48\" (UID: \"3e9e8fa5-de72-4a69-82e5-8037808b43c9\") " pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.848083 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxmmm\" (UniqueName: \"kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm\") pod \"openstack-operator-index-frq48\" (UID: \"3e9e8fa5-de72-4a69-82e5-8037808b43c9\") " pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.870286 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxmmm\" (UniqueName: \"kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm\") pod \"openstack-operator-index-frq48\" (UID: \"3e9e8fa5-de72-4a69-82e5-8037808b43c9\") " pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:21 crc kubenswrapper[4938]: I1122 10:53:21.929802 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:22 crc kubenswrapper[4938]: I1122 10:53:22.117985 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:22 crc kubenswrapper[4938]: I1122 10:53:22.931675 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-frq48" event={"ID":"3e9e8fa5-de72-4a69-82e5-8037808b43c9","Type":"ContainerStarted","Data":"e91223703113b7936c6a477ec0a8e51e6c687e74c9f5dac3f9f4e3c1e803cfcf"} Nov 22 10:53:23 crc kubenswrapper[4938]: I1122 10:53:23.770751 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:24 crc kubenswrapper[4938]: I1122 10:53:24.943471 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-frq48" event={"ID":"3e9e8fa5-de72-4a69-82e5-8037808b43c9","Type":"ContainerStarted","Data":"8bc7f1ec8a242a84fb02321f09085bb3e270f1e664b63a63667261285e4b0515"} Nov 22 10:53:24 crc kubenswrapper[4938]: I1122 10:53:24.955585 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-frq48" podStartSLOduration=2.025007275 podStartE2EDuration="3.955569218s" podCreationTimestamp="2025-11-22 10:53:21 +0000 UTC" firstStartedPulling="2025-11-22 10:53:22.116790753 +0000 UTC m=+934.584628152" lastFinishedPulling="2025-11-22 10:53:24.047352696 +0000 UTC m=+936.515190095" observedRunningTime="2025-11-22 10:53:24.955215129 +0000 UTC m=+937.423052528" watchObservedRunningTime="2025-11-22 10:53:24.955569218 +0000 UTC m=+937.423406617" Nov 22 10:53:27 crc kubenswrapper[4938]: I1122 10:53:27.597540 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:27 crc kubenswrapper[4938]: I1122 10:53:27.597929 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-frq48" podUID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" containerName="registry-server" containerID="cri-o://8bc7f1ec8a242a84fb02321f09085bb3e270f1e664b63a63667261285e4b0515" gracePeriod=2 Nov 22 10:53:27 crc kubenswrapper[4938]: I1122 10:53:27.966088 4938 generic.go:334] "Generic (PLEG): container finished" podID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" containerID="8bc7f1ec8a242a84fb02321f09085bb3e270f1e664b63a63667261285e4b0515" exitCode=0 Nov 22 10:53:27 crc kubenswrapper[4938]: I1122 10:53:27.966143 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-frq48" event={"ID":"3e9e8fa5-de72-4a69-82e5-8037808b43c9","Type":"ContainerDied","Data":"8bc7f1ec8a242a84fb02321f09085bb3e270f1e664b63a63667261285e4b0515"} Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.411210 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-j4l4j"] Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.412649 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.417417 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-j4l4j"] Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.439175 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcbnl\" (UniqueName: \"kubernetes.io/projected/122aa2ac-8dc3-4698-818b-120126fb039b-kube-api-access-lcbnl\") pod \"openstack-operator-index-j4l4j\" (UID: \"122aa2ac-8dc3-4698-818b-120126fb039b\") " pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.531423 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.540091 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcbnl\" (UniqueName: \"kubernetes.io/projected/122aa2ac-8dc3-4698-818b-120126fb039b-kube-api-access-lcbnl\") pod \"openstack-operator-index-j4l4j\" (UID: \"122aa2ac-8dc3-4698-818b-120126fb039b\") " pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.564416 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcbnl\" (UniqueName: \"kubernetes.io/projected/122aa2ac-8dc3-4698-818b-120126fb039b-kube-api-access-lcbnl\") pod \"openstack-operator-index-j4l4j\" (UID: \"122aa2ac-8dc3-4698-818b-120126fb039b\") " pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.641076 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxmmm\" (UniqueName: \"kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm\") pod \"3e9e8fa5-de72-4a69-82e5-8037808b43c9\" (UID: \"3e9e8fa5-de72-4a69-82e5-8037808b43c9\") " Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.644317 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm" (OuterVolumeSpecName: "kube-api-access-wxmmm") pod "3e9e8fa5-de72-4a69-82e5-8037808b43c9" (UID: "3e9e8fa5-de72-4a69-82e5-8037808b43c9"). InnerVolumeSpecName "kube-api-access-wxmmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.738622 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.742391 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxmmm\" (UniqueName: \"kubernetes.io/projected/3e9e8fa5-de72-4a69-82e5-8037808b43c9-kube-api-access-wxmmm\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.972460 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-frq48" event={"ID":"3e9e8fa5-de72-4a69-82e5-8037808b43c9","Type":"ContainerDied","Data":"e91223703113b7936c6a477ec0a8e51e6c687e74c9f5dac3f9f4e3c1e803cfcf"} Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.972842 4938 scope.go:117] "RemoveContainer" containerID="8bc7f1ec8a242a84fb02321f09085bb3e270f1e664b63a63667261285e4b0515" Nov 22 10:53:28 crc kubenswrapper[4938]: I1122 10:53:28.972482 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-frq48" Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.002123 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.005802 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-frq48"] Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.142185 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-j4l4j"] Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.979552 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-j4l4j" event={"ID":"122aa2ac-8dc3-4698-818b-120126fb039b","Type":"ContainerStarted","Data":"0774f797c4b4e1948350fbd737180b1f4379426989cd928d9d16ed15fc47089a"} Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.979602 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-j4l4j" event={"ID":"122aa2ac-8dc3-4698-818b-120126fb039b","Type":"ContainerStarted","Data":"088cba2cd418d19d251a0530dc56087d91fb7fad8c2da178428119a65e0b923b"} Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.993832 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-j4l4j" podStartSLOduration=1.923236773 podStartE2EDuration="1.993812345s" podCreationTimestamp="2025-11-22 10:53:28 +0000 UTC" firstStartedPulling="2025-11-22 10:53:29.151362532 +0000 UTC m=+941.619199931" lastFinishedPulling="2025-11-22 10:53:29.221938084 +0000 UTC m=+941.689775503" observedRunningTime="2025-11-22 10:53:29.99243191 +0000 UTC m=+942.460269299" watchObservedRunningTime="2025-11-22 10:53:29.993812345 +0000 UTC m=+942.461649744" Nov 22 10:53:29 crc kubenswrapper[4938]: I1122 10:53:29.999143 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-9gsl5" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.461185 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" path="/var/lib/kubelet/pods/3e9e8fa5-de72-4a69-82e5-8037808b43c9/volumes" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.598674 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.598901 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kjx4c" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="registry-server" containerID="cri-o://fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027" gracePeriod=2 Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.959318 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.969104 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2t98\" (UniqueName: \"kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98\") pod \"15e98409-e3dd-404f-9686-6d8b74367b2d\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.969152 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content\") pod \"15e98409-e3dd-404f-9686-6d8b74367b2d\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.969177 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities\") pod \"15e98409-e3dd-404f-9686-6d8b74367b2d\" (UID: \"15e98409-e3dd-404f-9686-6d8b74367b2d\") " Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.970337 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities" (OuterVolumeSpecName: "utilities") pod "15e98409-e3dd-404f-9686-6d8b74367b2d" (UID: "15e98409-e3dd-404f-9686-6d8b74367b2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.981125 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98" (OuterVolumeSpecName: "kube-api-access-w2t98") pod "15e98409-e3dd-404f-9686-6d8b74367b2d" (UID: "15e98409-e3dd-404f-9686-6d8b74367b2d"). InnerVolumeSpecName "kube-api-access-w2t98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.989407 4938 generic.go:334] "Generic (PLEG): container finished" podID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerID="fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027" exitCode=0 Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.989489 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kjx4c" Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.989522 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerDied","Data":"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027"} Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.989623 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kjx4c" event={"ID":"15e98409-e3dd-404f-9686-6d8b74367b2d","Type":"ContainerDied","Data":"fbc4458729000e97ba504fee3ba563d35ea7488e7111e92445885f198a778be7"} Nov 22 10:53:30 crc kubenswrapper[4938]: I1122 10:53:30.989713 4938 scope.go:117] "RemoveContainer" containerID="fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.007430 4938 scope.go:117] "RemoveContainer" containerID="24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.020665 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15e98409-e3dd-404f-9686-6d8b74367b2d" (UID: "15e98409-e3dd-404f-9686-6d8b74367b2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.027078 4938 scope.go:117] "RemoveContainer" containerID="607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.043109 4938 scope.go:117] "RemoveContainer" containerID="fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027" Nov 22 10:53:31 crc kubenswrapper[4938]: E1122 10:53:31.043569 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027\": container with ID starting with fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027 not found: ID does not exist" containerID="fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.043597 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027"} err="failed to get container status \"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027\": rpc error: code = NotFound desc = could not find container \"fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027\": container with ID starting with fce5359769c8849a71abcf06cda4eb59ba3ebe5ee414a6f64f75d757a7f23027 not found: ID does not exist" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.043619 4938 scope.go:117] "RemoveContainer" containerID="24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7" Nov 22 10:53:31 crc kubenswrapper[4938]: E1122 10:53:31.044014 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7\": container with ID starting with 24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7 not found: ID does not exist" containerID="24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.044124 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7"} err="failed to get container status \"24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7\": rpc error: code = NotFound desc = could not find container \"24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7\": container with ID starting with 24ada195a4c81d6a45ee3bc1caf05855590cda9d35c81112f06591abed26c4f7 not found: ID does not exist" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.044203 4938 scope.go:117] "RemoveContainer" containerID="607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7" Nov 22 10:53:31 crc kubenswrapper[4938]: E1122 10:53:31.044673 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7\": container with ID starting with 607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7 not found: ID does not exist" containerID="607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.044752 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7"} err="failed to get container status \"607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7\": rpc error: code = NotFound desc = could not find container \"607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7\": container with ID starting with 607c394baaa089d11985091a62583b67e7cb3a0923ba53ff6351ba61bcdf61f7 not found: ID does not exist" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.070849 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2t98\" (UniqueName: \"kubernetes.io/projected/15e98409-e3dd-404f-9686-6d8b74367b2d-kube-api-access-w2t98\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.071179 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.071268 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e98409-e3dd-404f-9686-6d8b74367b2d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.316889 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:31 crc kubenswrapper[4938]: I1122 10:53:31.320153 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kjx4c"] Nov 22 10:53:32 crc kubenswrapper[4938]: I1122 10:53:32.457260 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" path="/var/lib/kubelet/pods/15e98409-e3dd-404f-9686-6d8b74367b2d/volumes" Nov 22 10:53:38 crc kubenswrapper[4938]: I1122 10:53:38.739042 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:38 crc kubenswrapper[4938]: I1122 10:53:38.739677 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:38 crc kubenswrapper[4938]: I1122 10:53:38.763602 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:39 crc kubenswrapper[4938]: I1122 10:53:39.051063 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-j4l4j" Nov 22 10:53:41 crc kubenswrapper[4938]: I1122 10:53:41.300837 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:53:41 crc kubenswrapper[4938]: I1122 10:53:41.301164 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.855662 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c"] Nov 22 10:53:53 crc kubenswrapper[4938]: E1122 10:53:53.856362 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856373 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: E1122 10:53:53.856383 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856390 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: E1122 10:53:53.856412 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856419 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4938]: E1122 10:53:53.856428 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856434 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856532 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e9e8fa5-de72-4a69-82e5-8037808b43c9" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.856547 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e98409-e3dd-404f-9686-6d8b74367b2d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.857476 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.859332 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-h5trs" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.861447 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c"] Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.994165 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pddll\" (UniqueName: \"kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.994451 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:53 crc kubenswrapper[4938]: I1122 10:53:53.994600 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.096149 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pddll\" (UniqueName: \"kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.096274 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.096372 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.096781 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.096833 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.114310 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pddll\" (UniqueName: \"kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll\") pod \"1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.204755 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:54 crc kubenswrapper[4938]: I1122 10:53:54.613273 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c"] Nov 22 10:53:55 crc kubenswrapper[4938]: I1122 10:53:55.122267 4938 generic.go:334] "Generic (PLEG): container finished" podID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerID="822243d8d8674a9beb15a19660ca250b5fbf6551e40679d1eba515fca6de76fc" exitCode=0 Nov 22 10:53:55 crc kubenswrapper[4938]: I1122 10:53:55.123023 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" event={"ID":"04a11c92-d0c4-462d-ad17-c81256ce817c","Type":"ContainerDied","Data":"822243d8d8674a9beb15a19660ca250b5fbf6551e40679d1eba515fca6de76fc"} Nov 22 10:53:55 crc kubenswrapper[4938]: I1122 10:53:55.123071 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" event={"ID":"04a11c92-d0c4-462d-ad17-c81256ce817c","Type":"ContainerStarted","Data":"196784511e25724c9dcfd47454bbde30f53967978364e982b3da54de700e09c2"} Nov 22 10:53:56 crc kubenswrapper[4938]: I1122 10:53:56.130791 4938 generic.go:334] "Generic (PLEG): container finished" podID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerID="f4c84dbc9c497971c7e36fe07c7f50522d2268438780d894f5ecb450b8c74498" exitCode=0 Nov 22 10:53:56 crc kubenswrapper[4938]: I1122 10:53:56.130828 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" event={"ID":"04a11c92-d0c4-462d-ad17-c81256ce817c","Type":"ContainerDied","Data":"f4c84dbc9c497971c7e36fe07c7f50522d2268438780d894f5ecb450b8c74498"} Nov 22 10:53:57 crc kubenswrapper[4938]: I1122 10:53:57.139234 4938 generic.go:334] "Generic (PLEG): container finished" podID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerID="bc45716b2b1b31a71fa49663e6df65a88e415bfefa1af29a2a9de7081dac028e" exitCode=0 Nov 22 10:53:57 crc kubenswrapper[4938]: I1122 10:53:57.139301 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" event={"ID":"04a11c92-d0c4-462d-ad17-c81256ce817c","Type":"ContainerDied","Data":"bc45716b2b1b31a71fa49663e6df65a88e415bfefa1af29a2a9de7081dac028e"} Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.360397 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.555016 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util\") pod \"04a11c92-d0c4-462d-ad17-c81256ce817c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.555087 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle\") pod \"04a11c92-d0c4-462d-ad17-c81256ce817c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.555170 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pddll\" (UniqueName: \"kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll\") pod \"04a11c92-d0c4-462d-ad17-c81256ce817c\" (UID: \"04a11c92-d0c4-462d-ad17-c81256ce817c\") " Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.556166 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle" (OuterVolumeSpecName: "bundle") pod "04a11c92-d0c4-462d-ad17-c81256ce817c" (UID: "04a11c92-d0c4-462d-ad17-c81256ce817c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.561016 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll" (OuterVolumeSpecName: "kube-api-access-pddll") pod "04a11c92-d0c4-462d-ad17-c81256ce817c" (UID: "04a11c92-d0c4-462d-ad17-c81256ce817c"). InnerVolumeSpecName "kube-api-access-pddll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.570828 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util" (OuterVolumeSpecName: "util") pod "04a11c92-d0c4-462d-ad17-c81256ce817c" (UID: "04a11c92-d0c4-462d-ad17-c81256ce817c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.656403 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pddll\" (UniqueName: \"kubernetes.io/projected/04a11c92-d0c4-462d-ad17-c81256ce817c-kube-api-access-pddll\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.656435 4938 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:58 crc kubenswrapper[4938]: I1122 10:53:58.656444 4938 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a11c92-d0c4-462d-ad17-c81256ce817c-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:59 crc kubenswrapper[4938]: I1122 10:53:59.156459 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" event={"ID":"04a11c92-d0c4-462d-ad17-c81256ce817c","Type":"ContainerDied","Data":"196784511e25724c9dcfd47454bbde30f53967978364e982b3da54de700e09c2"} Nov 22 10:53:59 crc kubenswrapper[4938]: I1122 10:53:59.156790 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="196784511e25724c9dcfd47454bbde30f53967978364e982b3da54de700e09c2" Nov 22 10:53:59 crc kubenswrapper[4938]: I1122 10:53:59.156894 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.607538 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2"] Nov 22 10:54:02 crc kubenswrapper[4938]: E1122 10:54:02.608092 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="extract" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.608105 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="extract" Nov 22 10:54:02 crc kubenswrapper[4938]: E1122 10:54:02.608121 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="util" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.608127 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="util" Nov 22 10:54:02 crc kubenswrapper[4938]: E1122 10:54:02.608141 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="pull" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.608148 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="pull" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.608279 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="04a11c92-d0c4-462d-ad17-c81256ce817c" containerName="extract" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.608823 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.615084 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-g2dcg" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.637195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52d8v\" (UniqueName: \"kubernetes.io/projected/f4501035-7ea2-41f7-a3d4-12ab72d52a0c-kube-api-access-52d8v\") pod \"openstack-operator-controller-operator-6d45d44995-nrhm2\" (UID: \"f4501035-7ea2-41f7-a3d4-12ab72d52a0c\") " pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.707885 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2"] Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.737897 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52d8v\" (UniqueName: \"kubernetes.io/projected/f4501035-7ea2-41f7-a3d4-12ab72d52a0c-kube-api-access-52d8v\") pod \"openstack-operator-controller-operator-6d45d44995-nrhm2\" (UID: \"f4501035-7ea2-41f7-a3d4-12ab72d52a0c\") " pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.763900 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52d8v\" (UniqueName: \"kubernetes.io/projected/f4501035-7ea2-41f7-a3d4-12ab72d52a0c-kube-api-access-52d8v\") pod \"openstack-operator-controller-operator-6d45d44995-nrhm2\" (UID: \"f4501035-7ea2-41f7-a3d4-12ab72d52a0c\") " pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:02 crc kubenswrapper[4938]: I1122 10:54:02.927741 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:03 crc kubenswrapper[4938]: I1122 10:54:03.373148 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2"] Nov 22 10:54:04 crc kubenswrapper[4938]: I1122 10:54:04.187867 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" event={"ID":"f4501035-7ea2-41f7-a3d4-12ab72d52a0c","Type":"ContainerStarted","Data":"9a434b5008e91a937be84fe96de0cd2136beca7d93a0f40a6ad6db276fed5495"} Nov 22 10:54:08 crc kubenswrapper[4938]: I1122 10:54:08.223457 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" event={"ID":"f4501035-7ea2-41f7-a3d4-12ab72d52a0c","Type":"ContainerStarted","Data":"a1a341a0051c7f571fe8e2aca5fa05f3c765482bded18e2d7e0b2f7a5f602c0d"} Nov 22 10:54:10 crc kubenswrapper[4938]: I1122 10:54:10.246310 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" event={"ID":"f4501035-7ea2-41f7-a3d4-12ab72d52a0c","Type":"ContainerStarted","Data":"234fc0ea4d0fdc4561273e772393ebc3d12a33dbc414c9c2236466249da3d57e"} Nov 22 10:54:10 crc kubenswrapper[4938]: I1122 10:54:10.246666 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:10 crc kubenswrapper[4938]: I1122 10:54:10.283710 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" podStartSLOduration=2.188595035 podStartE2EDuration="8.283695638s" podCreationTimestamp="2025-11-22 10:54:02 +0000 UTC" firstStartedPulling="2025-11-22 10:54:03.380971687 +0000 UTC m=+975.848809086" lastFinishedPulling="2025-11-22 10:54:09.47607229 +0000 UTC m=+981.943909689" observedRunningTime="2025-11-22 10:54:10.28141738 +0000 UTC m=+982.749254789" watchObservedRunningTime="2025-11-22 10:54:10.283695638 +0000 UTC m=+982.751533027" Nov 22 10:54:11 crc kubenswrapper[4938]: I1122 10:54:11.300695 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:54:11 crc kubenswrapper[4938]: I1122 10:54:11.301542 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:54:11 crc kubenswrapper[4938]: I1122 10:54:11.301668 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:54:11 crc kubenswrapper[4938]: I1122 10:54:11.302383 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:54:11 crc kubenswrapper[4938]: I1122 10:54:11.302546 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe" gracePeriod=600 Nov 22 10:54:12 crc kubenswrapper[4938]: I1122 10:54:12.259768 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe" exitCode=0 Nov 22 10:54:12 crc kubenswrapper[4938]: I1122 10:54:12.259848 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe"} Nov 22 10:54:12 crc kubenswrapper[4938]: I1122 10:54:12.260338 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404"} Nov 22 10:54:12 crc kubenswrapper[4938]: I1122 10:54:12.260358 4938 scope.go:117] "RemoveContainer" containerID="d2b3abdbe6f3e506231efa45f5e8c99ca4e0148edc203fafb1a364658f241e4f" Nov 22 10:54:12 crc kubenswrapper[4938]: I1122 10:54:12.930896 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6d45d44995-nrhm2" Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.988370 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9"] Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.989925 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.991724 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc"] Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.992611 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-d59bs" Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.992674 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.996619 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj"] Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.997985 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:30 crc kubenswrapper[4938]: I1122 10:54:30.998533 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-dr88d" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.000062 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.003537 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.011074 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-5w5jt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.017808 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.044791 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.045719 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.047960 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-ldkbx" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.056975 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.058078 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.062373 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-cvwhv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.073978 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.075776 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th6kn\" (UniqueName: \"kubernetes.io/projected/cba844df-58bc-4d1e-989c-9eb4ccb036b6-kube-api-access-th6kn\") pod \"cinder-operator-controller-manager-748967c98-jhgfc\" (UID: \"cba844df-58bc-4d1e-989c-9eb4ccb036b6\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.075842 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lp2qd\" (UniqueName: \"kubernetes.io/projected/2415b98c-1a50-4f8d-b094-de51a90a0088-kube-api-access-lp2qd\") pod \"designate-operator-controller-manager-6788cc6d75-8b8mj\" (UID: \"2415b98c-1a50-4f8d-b094-de51a90a0088\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.075886 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llhgs\" (UniqueName: \"kubernetes.io/projected/dde3bedc-34b4-41e0-adba-78c802591de5-kube-api-access-llhgs\") pod \"barbican-operator-controller-manager-5bfbbb859d-mrpj9\" (UID: \"dde3bedc-34b4-41e0-adba-78c802591de5\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.099729 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.119421 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.130488 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.144394 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-n885r" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.171208 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.176787 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsvzf\" (UniqueName: \"kubernetes.io/projected/496d9a6a-3979-43af-aa47-9161506bc8e9-kube-api-access-gsvzf\") pod \"heat-operator-controller-manager-698d6fd7d6-j8ftc\" (UID: \"496d9a6a-3979-43af-aa47-9161506bc8e9\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.176862 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th6kn\" (UniqueName: \"kubernetes.io/projected/cba844df-58bc-4d1e-989c-9eb4ccb036b6-kube-api-access-th6kn\") pod \"cinder-operator-controller-manager-748967c98-jhgfc\" (UID: \"cba844df-58bc-4d1e-989c-9eb4ccb036b6\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.176890 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f6tv\" (UniqueName: \"kubernetes.io/projected/a8e3c6f8-4a77-4180-a67b-3dab37169c07-kube-api-access-4f6tv\") pod \"glance-operator-controller-manager-6f95d84fd6-s2d7b\" (UID: \"a8e3c6f8-4a77-4180-a67b-3dab37169c07\") " pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.176929 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lp2qd\" (UniqueName: \"kubernetes.io/projected/2415b98c-1a50-4f8d-b094-de51a90a0088-kube-api-access-lp2qd\") pod \"designate-operator-controller-manager-6788cc6d75-8b8mj\" (UID: \"2415b98c-1a50-4f8d-b094-de51a90a0088\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.176954 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llhgs\" (UniqueName: \"kubernetes.io/projected/dde3bedc-34b4-41e0-adba-78c802591de5-kube-api-access-llhgs\") pod \"barbican-operator-controller-manager-5bfbbb859d-mrpj9\" (UID: \"dde3bedc-34b4-41e0-adba-78c802591de5\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.184786 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.194728 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.200238 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.200473 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-p5lgt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.204183 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.208619 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lp2qd\" (UniqueName: \"kubernetes.io/projected/2415b98c-1a50-4f8d-b094-de51a90a0088-kube-api-access-lp2qd\") pod \"designate-operator-controller-manager-6788cc6d75-8b8mj\" (UID: \"2415b98c-1a50-4f8d-b094-de51a90a0088\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.212647 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th6kn\" (UniqueName: \"kubernetes.io/projected/cba844df-58bc-4d1e-989c-9eb4ccb036b6-kube-api-access-th6kn\") pod \"cinder-operator-controller-manager-748967c98-jhgfc\" (UID: \"cba844df-58bc-4d1e-989c-9eb4ccb036b6\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.221646 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llhgs\" (UniqueName: \"kubernetes.io/projected/dde3bedc-34b4-41e0-adba-78c802591de5-kube-api-access-llhgs\") pod \"barbican-operator-controller-manager-5bfbbb859d-mrpj9\" (UID: \"dde3bedc-34b4-41e0-adba-78c802591de5\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.221729 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-tb846"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.223966 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.226751 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-5hs55" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.249969 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.251034 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.257014 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-fkq5c" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.259963 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-tb846"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.273653 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.283828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f6tv\" (UniqueName: \"kubernetes.io/projected/a8e3c6f8-4a77-4180-a67b-3dab37169c07-kube-api-access-4f6tv\") pod \"glance-operator-controller-manager-6f95d84fd6-s2d7b\" (UID: \"a8e3c6f8-4a77-4180-a67b-3dab37169c07\") " pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.283882 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2d5b\" (UniqueName: \"kubernetes.io/projected/8659e26c-11d1-4a24-82e3-42e9737a54b8-kube-api-access-n2d5b\") pod \"horizon-operator-controller-manager-7d5d9fd47f-r9lgr\" (UID: \"8659e26c-11d1-4a24-82e3-42e9737a54b8\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.283939 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxpjl\" (UniqueName: \"kubernetes.io/projected/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-kube-api-access-hxpjl\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.283966 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsvzf\" (UniqueName: \"kubernetes.io/projected/496d9a6a-3979-43af-aa47-9161506bc8e9-kube-api-access-gsvzf\") pod \"heat-operator-controller-manager-698d6fd7d6-j8ftc\" (UID: \"496d9a6a-3979-43af-aa47-9161506bc8e9\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.283993 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.302715 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.303731 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.309293 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.310977 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-mrm5p" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.312222 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.313331 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.313579 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f6tv\" (UniqueName: \"kubernetes.io/projected/a8e3c6f8-4a77-4180-a67b-3dab37169c07-kube-api-access-4f6tv\") pod \"glance-operator-controller-manager-6f95d84fd6-s2d7b\" (UID: \"a8e3c6f8-4a77-4180-a67b-3dab37169c07\") " pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.315579 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-h6lvd" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.318251 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.334543 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.352243 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.358527 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsvzf\" (UniqueName: \"kubernetes.io/projected/496d9a6a-3979-43af-aa47-9161506bc8e9-kube-api-access-gsvzf\") pod \"heat-operator-controller-manager-698d6fd7d6-j8ftc\" (UID: \"496d9a6a-3979-43af-aa47-9161506bc8e9\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.371078 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.377362 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.378442 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.379998 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.380702 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-v9pcc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385183 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxpjl\" (UniqueName: \"kubernetes.io/projected/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-kube-api-access-hxpjl\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385289 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385381 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7p5p\" (UniqueName: \"kubernetes.io/projected/fd298c00-9118-413b-bce4-1198393538fa-kube-api-access-m7p5p\") pod \"manila-operator-controller-manager-646fd589f9-gc4lw\" (UID: \"fd298c00-9118-413b-bce4-1198393538fa\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385512 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8r6q\" (UniqueName: \"kubernetes.io/projected/c933df24-871e-4075-b48f-f8903914716b-kube-api-access-m8r6q\") pod \"mariadb-operator-controller-manager-64d7c556cd-qq5ww\" (UID: \"c933df24-871e-4075-b48f-f8903914716b\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385614 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2d5b\" (UniqueName: \"kubernetes.io/projected/8659e26c-11d1-4a24-82e3-42e9737a54b8-kube-api-access-n2d5b\") pod \"horizon-operator-controller-manager-7d5d9fd47f-r9lgr\" (UID: \"8659e26c-11d1-4a24-82e3-42e9737a54b8\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.385658 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vnjs\" (UniqueName: \"kubernetes.io/projected/2a30b9b0-97ac-4268-8d85-193fa80c6b01-kube-api-access-8vnjs\") pod \"ironic-operator-controller-manager-54485f899-tb846\" (UID: \"2a30b9b0-97ac-4268-8d85-193fa80c6b01\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:31 crc kubenswrapper[4938]: E1122 10:54:31.386391 4938 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.386463 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q"] Nov 22 10:54:31 crc kubenswrapper[4938]: E1122 10:54:31.386516 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert podName:9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95 nodeName:}" failed. No retries permitted until 2025-11-22 10:54:31.886421771 +0000 UTC m=+1004.354259170 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert") pod "infra-operator-controller-manager-6c55d8d69b-z8ksz" (UID: "9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95") : secret "infra-operator-webhook-server-cert" not found Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.386868 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.394543 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkk5h\" (UniqueName: \"kubernetes.io/projected/65118a1f-ed5e-4354-8494-4df42ff6ae6a-kube-api-access-kkk5h\") pod \"keystone-operator-controller-manager-79cc9d59f5-gxvrt\" (UID: \"65118a1f-ed5e-4354-8494-4df42ff6ae6a\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.400993 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.402345 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.404539 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rwb6q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.409329 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2d5b\" (UniqueName: \"kubernetes.io/projected/8659e26c-11d1-4a24-82e3-42e9737a54b8-kube-api-access-n2d5b\") pod \"horizon-operator-controller-manager-7d5d9fd47f-r9lgr\" (UID: \"8659e26c-11d1-4a24-82e3-42e9737a54b8\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.415831 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxpjl\" (UniqueName: \"kubernetes.io/projected/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-kube-api-access-hxpjl\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.425893 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.429516 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.439891 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-tkhsf" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.450938 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.457094 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.462554 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.470068 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.471721 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.478496 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-fgk9s" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495390 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4v5f\" (UniqueName: \"kubernetes.io/projected/46844239-10fa-433c-bd82-565bf911989c-kube-api-access-j4v5f\") pod \"neutron-operator-controller-manager-58879495c-xjw2q\" (UID: \"46844239-10fa-433c-bd82-565bf911989c\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495463 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7p5p\" (UniqueName: \"kubernetes.io/projected/fd298c00-9118-413b-bce4-1198393538fa-kube-api-access-m7p5p\") pod \"manila-operator-controller-manager-646fd589f9-gc4lw\" (UID: \"fd298c00-9118-413b-bce4-1198393538fa\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495498 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp9wf\" (UniqueName: \"kubernetes.io/projected/ca599052-ab51-498f-882d-895854e272c4-kube-api-access-wp9wf\") pod \"nova-operator-controller-manager-79d658b66d-dzpvv\" (UID: \"ca599052-ab51-498f-882d-895854e272c4\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495792 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495925 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8r6q\" (UniqueName: \"kubernetes.io/projected/c933df24-871e-4075-b48f-f8903914716b-kube-api-access-m8r6q\") pod \"mariadb-operator-controller-manager-64d7c556cd-qq5ww\" (UID: \"c933df24-871e-4075-b48f-f8903914716b\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.495980 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkk5h\" (UniqueName: \"kubernetes.io/projected/65118a1f-ed5e-4354-8494-4df42ff6ae6a-kube-api-access-kkk5h\") pod \"keystone-operator-controller-manager-79cc9d59f5-gxvrt\" (UID: \"65118a1f-ed5e-4354-8494-4df42ff6ae6a\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.496004 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vnjs\" (UniqueName: \"kubernetes.io/projected/2a30b9b0-97ac-4268-8d85-193fa80c6b01-kube-api-access-8vnjs\") pod \"ironic-operator-controller-manager-54485f899-tb846\" (UID: \"2a30b9b0-97ac-4268-8d85-193fa80c6b01\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.496042 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fkzk\" (UniqueName: \"kubernetes.io/projected/1016b115-4617-4a19-a992-91dd5b124c9b-kube-api-access-9fkzk\") pod \"octavia-operator-controller-manager-d5fb87cb8-qps9n\" (UID: \"1016b115-4617-4a19-a992-91dd5b124c9b\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.501684 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-98nqr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.502889 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.508621 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-m28m4" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.516754 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vnjs\" (UniqueName: \"kubernetes.io/projected/2a30b9b0-97ac-4268-8d85-193fa80c6b01-kube-api-access-8vnjs\") pod \"ironic-operator-controller-manager-54485f899-tb846\" (UID: \"2a30b9b0-97ac-4268-8d85-193fa80c6b01\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.518579 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkk5h\" (UniqueName: \"kubernetes.io/projected/65118a1f-ed5e-4354-8494-4df42ff6ae6a-kube-api-access-kkk5h\") pod \"keystone-operator-controller-manager-79cc9d59f5-gxvrt\" (UID: \"65118a1f-ed5e-4354-8494-4df42ff6ae6a\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.519498 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7p5p\" (UniqueName: \"kubernetes.io/projected/fd298c00-9118-413b-bce4-1198393538fa-kube-api-access-m7p5p\") pod \"manila-operator-controller-manager-646fd589f9-gc4lw\" (UID: \"fd298c00-9118-413b-bce4-1198393538fa\") " pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.530863 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8r6q\" (UniqueName: \"kubernetes.io/projected/c933df24-871e-4075-b48f-f8903914716b-kube-api-access-m8r6q\") pod \"mariadb-operator-controller-manager-64d7c556cd-qq5ww\" (UID: \"c933df24-871e-4075-b48f-f8903914716b\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.540331 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.543210 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.550506 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-9zjgn" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.568110 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.569124 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.571229 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.571421 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-hwzvx" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.573371 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-98nqr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.594538 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.600014 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fkzk\" (UniqueName: \"kubernetes.io/projected/1016b115-4617-4a19-a992-91dd5b124c9b-kube-api-access-9fkzk\") pod \"octavia-operator-controller-manager-d5fb87cb8-qps9n\" (UID: \"1016b115-4617-4a19-a992-91dd5b124c9b\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.600067 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8wlw\" (UniqueName: \"kubernetes.io/projected/ee7a691c-6232-4e30-b1bf-400c65b8b127-kube-api-access-m8wlw\") pod \"ovn-operator-controller-manager-5b67cfc8fb-dx2bq\" (UID: \"ee7a691c-6232-4e30-b1bf-400c65b8b127\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.600118 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4v5f\" (UniqueName: \"kubernetes.io/projected/46844239-10fa-433c-bd82-565bf911989c-kube-api-access-j4v5f\") pod \"neutron-operator-controller-manager-58879495c-xjw2q\" (UID: \"46844239-10fa-433c-bd82-565bf911989c\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.600159 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7zqw\" (UniqueName: \"kubernetes.io/projected/454affdc-b63c-4696-914f-f2abbf7896ca-kube-api-access-r7zqw\") pod \"placement-operator-controller-manager-867d87977b-98nqr\" (UID: \"454affdc-b63c-4696-914f-f2abbf7896ca\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.600189 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp9wf\" (UniqueName: \"kubernetes.io/projected/ca599052-ab51-498f-882d-895854e272c4-kube-api-access-wp9wf\") pod \"nova-operator-controller-manager-79d658b66d-dzpvv\" (UID: \"ca599052-ab51-498f-882d-895854e272c4\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.606781 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.612723 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.631637 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-v457z"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.633831 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.637136 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-tqt7x" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.637826 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-v457z"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.641661 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4v5f\" (UniqueName: \"kubernetes.io/projected/46844239-10fa-433c-bd82-565bf911989c-kube-api-access-j4v5f\") pod \"neutron-operator-controller-manager-58879495c-xjw2q\" (UID: \"46844239-10fa-433c-bd82-565bf911989c\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.642012 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp9wf\" (UniqueName: \"kubernetes.io/projected/ca599052-ab51-498f-882d-895854e272c4-kube-api-access-wp9wf\") pod \"nova-operator-controller-manager-79d658b66d-dzpvv\" (UID: \"ca599052-ab51-498f-882d-895854e272c4\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.651269 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fkzk\" (UniqueName: \"kubernetes.io/projected/1016b115-4617-4a19-a992-91dd5b124c9b-kube-api-access-9fkzk\") pod \"octavia-operator-controller-manager-d5fb87cb8-qps9n\" (UID: \"1016b115-4617-4a19-a992-91dd5b124c9b\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.660389 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.661533 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.663671 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-kg9jd" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.687712 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.691852 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701115 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7zqw\" (UniqueName: \"kubernetes.io/projected/454affdc-b63c-4696-914f-f2abbf7896ca-kube-api-access-r7zqw\") pod \"placement-operator-controller-manager-867d87977b-98nqr\" (UID: \"454affdc-b63c-4696-914f-f2abbf7896ca\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701157 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xvbl\" (UniqueName: \"kubernetes.io/projected/7e8f66c5-67cb-428e-bc4d-9e6e893af682-kube-api-access-9xvbl\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701200 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccfb4\" (UniqueName: \"kubernetes.io/projected/dbade39a-90d4-49d8-96cc-0a5175783ac1-kube-api-access-ccfb4\") pod \"telemetry-operator-controller-manager-695797c565-v457z\" (UID: \"dbade39a-90d4-49d8-96cc-0a5175783ac1\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701215 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701252 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8wlw\" (UniqueName: \"kubernetes.io/projected/ee7a691c-6232-4e30-b1bf-400c65b8b127-kube-api-access-m8wlw\") pod \"ovn-operator-controller-manager-5b67cfc8fb-dx2bq\" (UID: \"ee7a691c-6232-4e30-b1bf-400c65b8b127\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.701285 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhm4m\" (UniqueName: \"kubernetes.io/projected/6ceba7c3-c04c-4449-9788-ed341bdaceb7-kube-api-access-xhm4m\") pod \"swift-operator-controller-manager-8f6687c44-5r8n9\" (UID: \"6ceba7c3-c04c-4449-9788-ed341bdaceb7\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.708318 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.734446 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.734769 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.738947 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.739134 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.740312 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.741683 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4vh2c" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.758697 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7zqw\" (UniqueName: \"kubernetes.io/projected/454affdc-b63c-4696-914f-f2abbf7896ca-kube-api-access-r7zqw\") pod \"placement-operator-controller-manager-867d87977b-98nqr\" (UID: \"454affdc-b63c-4696-914f-f2abbf7896ca\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.763835 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.765166 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.766730 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.772665 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8wlw\" (UniqueName: \"kubernetes.io/projected/ee7a691c-6232-4e30-b1bf-400c65b8b127-kube-api-access-m8wlw\") pod \"ovn-operator-controller-manager-5b67cfc8fb-dx2bq\" (UID: \"ee7a691c-6232-4e30-b1bf-400c65b8b127\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.773668 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-x84kv" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.774052 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.781015 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.822131 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xvbl\" (UniqueName: \"kubernetes.io/projected/7e8f66c5-67cb-428e-bc4d-9e6e893af682-kube-api-access-9xvbl\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.822675 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccfb4\" (UniqueName: \"kubernetes.io/projected/dbade39a-90d4-49d8-96cc-0a5175783ac1-kube-api-access-ccfb4\") pod \"telemetry-operator-controller-manager-695797c565-v457z\" (UID: \"dbade39a-90d4-49d8-96cc-0a5175783ac1\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.822804 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.822900 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndddv\" (UniqueName: \"kubernetes.io/projected/584df814-c2c1-4566-a8d0-930b14020095-kube-api-access-ndddv\") pod \"test-operator-controller-manager-77db6bf9c-s5pww\" (UID: \"584df814-c2c1-4566-a8d0-930b14020095\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.822997 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghwjl\" (UniqueName: \"kubernetes.io/projected/98d39c17-a9b0-483d-b170-eb006b5ee4b9-kube-api-access-ghwjl\") pod \"watcher-operator-controller-manager-6b56b8849f-4rmnr\" (UID: \"98d39c17-a9b0-483d-b170-eb006b5ee4b9\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.823119 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhm4m\" (UniqueName: \"kubernetes.io/projected/6ceba7c3-c04c-4449-9788-ed341bdaceb7-kube-api-access-xhm4m\") pod \"swift-operator-controller-manager-8f6687c44-5r8n9\" (UID: \"6ceba7c3-c04c-4449-9788-ed341bdaceb7\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:31 crc kubenswrapper[4938]: E1122 10:54:31.823583 4938 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:31 crc kubenswrapper[4938]: E1122 10:54:31.823701 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert podName:7e8f66c5-67cb-428e-bc4d-9e6e893af682 nodeName:}" failed. No retries permitted until 2025-11-22 10:54:32.323687875 +0000 UTC m=+1004.791525264 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-2l2nm" (UID: "7e8f66c5-67cb-428e-bc4d-9e6e893af682") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.824056 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.833359 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.834355 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.842885 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.846144 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-64cpd" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.859144 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xvbl\" (UniqueName: \"kubernetes.io/projected/7e8f66c5-67cb-428e-bc4d-9e6e893af682-kube-api-access-9xvbl\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.859288 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccfb4\" (UniqueName: \"kubernetes.io/projected/dbade39a-90d4-49d8-96cc-0a5175783ac1-kube-api-access-ccfb4\") pod \"telemetry-operator-controller-manager-695797c565-v457z\" (UID: \"dbade39a-90d4-49d8-96cc-0a5175783ac1\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.861487 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.861595 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p"] Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.881059 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhm4m\" (UniqueName: \"kubernetes.io/projected/6ceba7c3-c04c-4449-9788-ed341bdaceb7-kube-api-access-xhm4m\") pod \"swift-operator-controller-manager-8f6687c44-5r8n9\" (UID: \"6ceba7c3-c04c-4449-9788-ed341bdaceb7\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924373 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzv85\" (UniqueName: \"kubernetes.io/projected/27df649b-2572-42d7-a137-6a82a01c482a-kube-api-access-dzv85\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p\" (UID: \"27df649b-2572-42d7-a137-6a82a01c482a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924464 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mg2n\" (UniqueName: \"kubernetes.io/projected/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-kube-api-access-9mg2n\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924492 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndddv\" (UniqueName: \"kubernetes.io/projected/584df814-c2c1-4566-a8d0-930b14020095-kube-api-access-ndddv\") pod \"test-operator-controller-manager-77db6bf9c-s5pww\" (UID: \"584df814-c2c1-4566-a8d0-930b14020095\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924516 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghwjl\" (UniqueName: \"kubernetes.io/projected/98d39c17-a9b0-483d-b170-eb006b5ee4b9-kube-api-access-ghwjl\") pod \"watcher-operator-controller-manager-6b56b8849f-4rmnr\" (UID: \"98d39c17-a9b0-483d-b170-eb006b5ee4b9\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924546 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.924597 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.948450 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.949425 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-z8ksz\" (UID: \"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.954104 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghwjl\" (UniqueName: \"kubernetes.io/projected/98d39c17-a9b0-483d-b170-eb006b5ee4b9-kube-api-access-ghwjl\") pod \"watcher-operator-controller-manager-6b56b8849f-4rmnr\" (UID: \"98d39c17-a9b0-483d-b170-eb006b5ee4b9\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:31 crc kubenswrapper[4938]: I1122 10:54:31.965416 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndddv\" (UniqueName: \"kubernetes.io/projected/584df814-c2c1-4566-a8d0-930b14020095-kube-api-access-ndddv\") pod \"test-operator-controller-manager-77db6bf9c-s5pww\" (UID: \"584df814-c2c1-4566-a8d0-930b14020095\") " pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.016608 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.026897 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mg2n\" (UniqueName: \"kubernetes.io/projected/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-kube-api-access-9mg2n\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.026966 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.027036 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzv85\" (UniqueName: \"kubernetes.io/projected/27df649b-2572-42d7-a137-6a82a01c482a-kube-api-access-dzv85\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p\" (UID: \"27df649b-2572-42d7-a137-6a82a01c482a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" Nov 22 10:54:32 crc kubenswrapper[4938]: E1122 10:54:32.027576 4938 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 22 10:54:32 crc kubenswrapper[4938]: E1122 10:54:32.027624 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert podName:7fc10dd9-2ded-4a21-badc-6e8bd9615dd1 nodeName:}" failed. No retries permitted until 2025-11-22 10:54:32.527610743 +0000 UTC m=+1004.995448142 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert") pod "openstack-operator-controller-manager-7f4bc68b84-bqqvf" (UID: "7fc10dd9-2ded-4a21-badc-6e8bd9615dd1") : secret "webhook-server-cert" not found Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.049131 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mg2n\" (UniqueName: \"kubernetes.io/projected/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-kube-api-access-9mg2n\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.056871 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzv85\" (UniqueName: \"kubernetes.io/projected/27df649b-2572-42d7-a137-6a82a01c482a-kube-api-access-dzv85\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p\" (UID: \"27df649b-2572-42d7-a137-6a82a01c482a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.065344 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.103690 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.104077 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:32 crc kubenswrapper[4938]: W1122 10:54:32.123757 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2415b98c_1a50_4f8d_b094_de51a90a0088.slice/crio-68bd889b2f60e6645ce60ceb64cc581da5ab672274c8a1a7d535d0a53a692bcb WatchSource:0}: Error finding container 68bd889b2f60e6645ce60ceb64cc581da5ab672274c8a1a7d535d0a53a692bcb: Status 404 returned error can't find the container with id 68bd889b2f60e6645ce60ceb64cc581da5ab672274c8a1a7d535d0a53a692bcb Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.177549 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.177566 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.256600 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.283768 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.290300 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.302745 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.340807 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.358079 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7e8f66c5-67cb-428e-bc4d-9e6e893af682-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-2l2nm\" (UID: \"7e8f66c5-67cb-428e-bc4d-9e6e893af682\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.376427 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.384480 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.419365 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" event={"ID":"dde3bedc-34b4-41e0-adba-78c802591de5","Type":"ContainerStarted","Data":"290a784e00c2d49e8bce3a124c0f3ba84a093ce02e7e3e2747cd4b704a84b636"} Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.427623 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" event={"ID":"2415b98c-1a50-4f8d-b094-de51a90a0088","Type":"ContainerStarted","Data":"68bd889b2f60e6645ce60ceb64cc581da5ab672274c8a1a7d535d0a53a692bcb"} Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.499957 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-tb846"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.546959 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.556449 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7fc10dd9-2ded-4a21-badc-6e8bd9615dd1-cert\") pod \"openstack-operator-controller-manager-7f4bc68b84-bqqvf\" (UID: \"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1\") " pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.565336 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.653460 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.666856 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.747519 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.752789 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.775283 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv"] Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.779553 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n"] Nov 22 10:54:32 crc kubenswrapper[4938]: W1122 10:54:32.816003 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca599052_ab51_498f_882d_895854e272c4.slice/crio-0e6fea1a7f202c0aeee8b352e7948ad90e2160b41fd23e62d558f7bebd28c57d WatchSource:0}: Error finding container 0e6fea1a7f202c0aeee8b352e7948ad90e2160b41fd23e62d558f7bebd28c57d: Status 404 returned error can't find the container with id 0e6fea1a7f202c0aeee8b352e7948ad90e2160b41fd23e62d558f7bebd28c57d Nov 22 10:54:32 crc kubenswrapper[4938]: W1122 10:54:32.817685 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1016b115_4617_4a19_a992_91dd5b124c9b.slice/crio-d8e59c1ab90073a7a1340701cd042d87d3bec980c46feb23ba759ece9a7b9f32 WatchSource:0}: Error finding container d8e59c1ab90073a7a1340701cd042d87d3bec980c46feb23ba759ece9a7b9f32: Status 404 returned error can't find the container with id d8e59c1ab90073a7a1340701cd042d87d3bec980c46feb23ba759ece9a7b9f32 Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.907614 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww"] Nov 22 10:54:32 crc kubenswrapper[4938]: W1122 10:54:32.918605 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc933df24_871e_4075_b48f_f8903914716b.slice/crio-822a145a69572294d94125980b14833921d934045d85ef4fb12996fb816bb19a WatchSource:0}: Error finding container 822a145a69572294d94125980b14833921d934045d85ef4fb12996fb816bb19a: Status 404 returned error can't find the container with id 822a145a69572294d94125980b14833921d934045d85ef4fb12996fb816bb19a Nov 22 10:54:32 crc kubenswrapper[4938]: E1122 10:54:32.921681 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m8r6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-64d7c556cd-qq5ww_openstack-operators(c933df24-871e-4075-b48f-f8903914716b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:32 crc kubenswrapper[4938]: I1122 10:54:32.931683 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-98nqr"] Nov 22 10:54:32 crc kubenswrapper[4938]: W1122 10:54:32.947258 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod454affdc_b63c_4696_914f_f2abbf7896ca.slice/crio-3db69aebb8dd1ab653da42c7a46187fc77c61642a9757e42e416e301788f3994 WatchSource:0}: Error finding container 3db69aebb8dd1ab653da42c7a46187fc77c61642a9757e42e416e301788f3994: Status 404 returned error can't find the container with id 3db69aebb8dd1ab653da42c7a46187fc77c61642a9757e42e416e301788f3994 Nov 22 10:54:32 crc kubenswrapper[4938]: E1122 10:54:32.950143 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r7zqw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-98nqr_openstack-operators(454affdc-b63c-4696-914f-f2abbf7896ca): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.012151 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr"] Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.017012 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww"] Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.032228 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-v457z"] Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.038480 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9"] Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.051979 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:624b77b1b44f5e72a6c7d5910b04eb8070c499f83dcf364fb9dc5f2f8cb83c85,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ndddv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-77db6bf9c-s5pww_openstack-operators(584df814-c2c1-4566-a8d0-930b14020095): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.052331 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xhm4m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-8f6687c44-5r8n9_openstack-operators(6ceba7c3-c04c-4449-9788-ed341bdaceb7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.054932 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ccfb4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-695797c565-v457z_openstack-operators(dbade39a-90d4-49d8-96cc-0a5175783ac1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.055903 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm"] Nov 22 10:54:33 crc kubenswrapper[4938]: W1122 10:54:33.061641 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e8f66c5_67cb_428e_bc4d_9e6e893af682.slice/crio-5d236e0af60b77e43fbb44f0a97cb50900b0e6af606cd9fca78f2488e02237a3 WatchSource:0}: Error finding container 5d236e0af60b77e43fbb44f0a97cb50900b0e6af606cd9fca78f2488e02237a3: Status 404 returned error can't find the container with id 5d236e0af60b77e43fbb44f0a97cb50900b0e6af606cd9fca78f2488e02237a3 Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.076906 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9xvbl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-77868f484-2l2nm_openstack-operators(7e8f66c5-67cb-428e-bc4d-9e6e893af682): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.154199 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p"] Nov 22 10:54:33 crc kubenswrapper[4938]: W1122 10:54:33.159078 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27df649b_2572_42d7_a137_6a82a01c482a.slice/crio-f9cfa1ee910fae950c06643e77fa16b8fc6263313170aea1b8b96cd3cc769caf WatchSource:0}: Error finding container f9cfa1ee910fae950c06643e77fa16b8fc6263313170aea1b8b96cd3cc769caf: Status 404 returned error can't find the container with id f9cfa1ee910fae950c06643e77fa16b8fc6263313170aea1b8b96cd3cc769caf Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.161867 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzv85,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p_openstack-operators(27df649b-2572-42d7-a137-6a82a01c482a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.163245 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" podUID="27df649b-2572-42d7-a137-6a82a01c482a" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.164010 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz"] Nov 22 10:54:33 crc kubenswrapper[4938]: W1122 10:54:33.175323 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9cd95fb2_a07b_4e6d_b9ed_6796ee31ee95.slice/crio-9dff4d9334704f923d349c91827bdf60235860f9f31e95c67a20d6e43e22012a WatchSource:0}: Error finding container 9dff4d9334704f923d349c91827bdf60235860f9f31e95c67a20d6e43e22012a: Status 404 returned error can't find the container with id 9dff4d9334704f923d349c91827bdf60235860f9f31e95c67a20d6e43e22012a Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.177647 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hxpjl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6c55d8d69b-z8ksz_openstack-operators(9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.286557 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" podUID="454affdc-b63c-4696-914f-f2abbf7896ca" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.326505 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" podUID="584df814-c2c1-4566-a8d0-930b14020095" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.333016 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf"] Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.341831 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" podUID="6ceba7c3-c04c-4449-9788-ed341bdaceb7" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.342247 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" podUID="dbade39a-90d4-49d8-96cc-0a5175783ac1" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.342357 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" podUID="c933df24-871e-4075-b48f-f8903914716b" Nov 22 10:54:33 crc kubenswrapper[4938]: W1122 10:54:33.342901 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fc10dd9_2ded_4a21_badc_6e8bd9615dd1.slice/crio-a190e2679bfa15fa029dedaa244ce13c131b88ae4e154f5fe8fdf3788b686b5c WatchSource:0}: Error finding container a190e2679bfa15fa029dedaa244ce13c131b88ae4e154f5fe8fdf3788b686b5c: Status 404 returned error can't find the container with id a190e2679bfa15fa029dedaa244ce13c131b88ae4e154f5fe8fdf3788b686b5c Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.359842 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" podUID="7e8f66c5-67cb-428e-bc4d-9e6e893af682" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.395735 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" podUID="9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.448623 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" event={"ID":"cba844df-58bc-4d1e-989c-9eb4ccb036b6","Type":"ContainerStarted","Data":"53360843f96a3f11bbda53df75ccec681f00c320e5e72f23241ff0228872cdeb"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.450857 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" event={"ID":"6ceba7c3-c04c-4449-9788-ed341bdaceb7","Type":"ContainerStarted","Data":"3ef1747d64d4f6302d8cd3640c4fea481784d0e96e1db2e5f29be4801b8b55d0"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.450883 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" event={"ID":"6ceba7c3-c04c-4449-9788-ed341bdaceb7","Type":"ContainerStarted","Data":"197210221fb9dd1c53bf33d6b3765266b9c81c7fc565b516831a2975def2fa62"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.452351 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" podUID="6ceba7c3-c04c-4449-9788-ed341bdaceb7" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.466596 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" event={"ID":"c933df24-871e-4075-b48f-f8903914716b","Type":"ContainerStarted","Data":"870f34e1bc5362914cce160c2b566643821e151d0d51c62407bca83315caa2c0"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.466641 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" event={"ID":"c933df24-871e-4075-b48f-f8903914716b","Type":"ContainerStarted","Data":"822a145a69572294d94125980b14833921d934045d85ef4fb12996fb816bb19a"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.471219 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" podUID="c933df24-871e-4075-b48f-f8903914716b" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.484117 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" event={"ID":"8659e26c-11d1-4a24-82e3-42e9737a54b8","Type":"ContainerStarted","Data":"1be1189a5619e7ce7598d49ffd6a5f5f65d18ff183230a4e7adac6d90e07c9d8"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.485222 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" event={"ID":"65118a1f-ed5e-4354-8494-4df42ff6ae6a","Type":"ContainerStarted","Data":"fc57a5e17100bac467e64c053ee77d8070b06f250b11f4f51f6d2b8eacad3011"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.486017 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" event={"ID":"2a30b9b0-97ac-4268-8d85-193fa80c6b01","Type":"ContainerStarted","Data":"e4355314acb560a25b9d2e430e013a5459f53593fda44f753082da3b1ded103e"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.495201 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" event={"ID":"7e8f66c5-67cb-428e-bc4d-9e6e893af682","Type":"ContainerStarted","Data":"6ae029aefa608f779dd727d13ac5d5ba284bbb7d5cbc98bb288dc9c18975dbab"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.495253 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" event={"ID":"7e8f66c5-67cb-428e-bc4d-9e6e893af682","Type":"ContainerStarted","Data":"5d236e0af60b77e43fbb44f0a97cb50900b0e6af606cd9fca78f2488e02237a3"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.498479 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" podUID="7e8f66c5-67cb-428e-bc4d-9e6e893af682" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.503697 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" event={"ID":"27df649b-2572-42d7-a137-6a82a01c482a","Type":"ContainerStarted","Data":"f9cfa1ee910fae950c06643e77fa16b8fc6263313170aea1b8b96cd3cc769caf"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.505825 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" podUID="27df649b-2572-42d7-a137-6a82a01c482a" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.506127 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" event={"ID":"98d39c17-a9b0-483d-b170-eb006b5ee4b9","Type":"ContainerStarted","Data":"fbe7bdf95c1bef3959f2b314614b4804061477b13715c61f831105a6d5c08091"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.507002 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" event={"ID":"ee7a691c-6232-4e30-b1bf-400c65b8b127","Type":"ContainerStarted","Data":"72ac728423d61e4833b560efd63f7defd3745265691b6750c80da5740f44b02d"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.508184 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" event={"ID":"1016b115-4617-4a19-a992-91dd5b124c9b","Type":"ContainerStarted","Data":"d8e59c1ab90073a7a1340701cd042d87d3bec980c46feb23ba759ece9a7b9f32"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.510844 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" event={"ID":"dbade39a-90d4-49d8-96cc-0a5175783ac1","Type":"ContainerStarted","Data":"b745db8554cf2c3e80ff1cff871c13ff8df574f031295d158e9be8057e49fcb2"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.510930 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" event={"ID":"dbade39a-90d4-49d8-96cc-0a5175783ac1","Type":"ContainerStarted","Data":"c555dd25ea3eb98522700dda9a8548258aeb7b4618b9164c115ccb2af14645e5"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.512787 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" podUID="dbade39a-90d4-49d8-96cc-0a5175783ac1" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.513459 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" event={"ID":"fd298c00-9118-413b-bce4-1198393538fa","Type":"ContainerStarted","Data":"2ef1d4d4e598287ef569eed532570d302eaba6d5fcfbec493d60bdbab0884813"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.515110 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" event={"ID":"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1","Type":"ContainerStarted","Data":"a190e2679bfa15fa029dedaa244ce13c131b88ae4e154f5fe8fdf3788b686b5c"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.554161 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" event={"ID":"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95","Type":"ContainerStarted","Data":"ed8a3dc93a20c48dc7f1431d7e3a36524de38ffa5f4970e9a2fb509e6268366e"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.554401 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" event={"ID":"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95","Type":"ContainerStarted","Data":"9dff4d9334704f923d349c91827bdf60235860f9f31e95c67a20d6e43e22012a"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.568225 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" podUID="9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95" Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.582810 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" event={"ID":"ca599052-ab51-498f-882d-895854e272c4","Type":"ContainerStarted","Data":"0e6fea1a7f202c0aeee8b352e7948ad90e2160b41fd23e62d558f7bebd28c57d"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.604728 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" event={"ID":"46844239-10fa-433c-bd82-565bf911989c","Type":"ContainerStarted","Data":"001f2302fe2b92bea502cb30bd1816464821f49d11b192ec7d2b5050b604ec0f"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.613439 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" event={"ID":"496d9a6a-3979-43af-aa47-9161506bc8e9","Type":"ContainerStarted","Data":"5621eeb94c4491c708dfd280f01cdd2982e950e49a1e3da19b5c0e01aeaff55f"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.615810 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" event={"ID":"584df814-c2c1-4566-a8d0-930b14020095","Type":"ContainerStarted","Data":"ab7abffb31602d440caafeb4c80654a40bab71fb17e90c5c72cef5bbd5b7076a"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.615841 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" event={"ID":"584df814-c2c1-4566-a8d0-930b14020095","Type":"ContainerStarted","Data":"f00bcb9e5eeab3079b85995e888f31fc383e48ea502f0c8892aead1a3f2aa842"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.625028 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" event={"ID":"a8e3c6f8-4a77-4180-a67b-3dab37169c07","Type":"ContainerStarted","Data":"669f99504240d0b02e090c512876113f478f77ea05dd379564a4340cdafcd845"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.626261 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" event={"ID":"454affdc-b63c-4696-914f-f2abbf7896ca","Type":"ContainerStarted","Data":"9128ce147a5b338caa3c7a281f9558d8970130216d1f4c5d0018ce39d2830dcd"} Nov 22 10:54:33 crc kubenswrapper[4938]: I1122 10:54:33.626293 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" event={"ID":"454affdc-b63c-4696-914f-f2abbf7896ca","Type":"ContainerStarted","Data":"3db69aebb8dd1ab653da42c7a46187fc77c61642a9757e42e416e301788f3994"} Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.648129 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" podUID="454affdc-b63c-4696-914f-f2abbf7896ca" Nov 22 10:54:33 crc kubenswrapper[4938]: E1122 10:54:33.648453 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:624b77b1b44f5e72a6c7d5910b04eb8070c499f83dcf364fb9dc5f2f8cb83c85\\\"\"" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" podUID="584df814-c2c1-4566-a8d0-930b14020095" Nov 22 10:54:34 crc kubenswrapper[4938]: I1122 10:54:34.637134 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" event={"ID":"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1","Type":"ContainerStarted","Data":"1c4da749d54dd34faec1ef3c3ce19fa07ba975ddc57e1cd223e2c934675a1d9b"} Nov 22 10:54:34 crc kubenswrapper[4938]: I1122 10:54:34.637200 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" event={"ID":"7fc10dd9-2ded-4a21-badc-6e8bd9615dd1","Type":"ContainerStarted","Data":"57a505c4420dcd6fa0e479cdf55b9a60e81f8ee98f466db40da26c9eb2824bbf"} Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.638354 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2c4fe20e044dd8ea1f60f2f3f5e3844d932b4b79439835bd8771c73f16b38312\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" podUID="c933df24-871e-4075-b48f-f8903914716b" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.640470 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" podUID="6ceba7c3-c04c-4449-9788-ed341bdaceb7" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.640637 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" podUID="454affdc-b63c-4696-914f-f2abbf7896ca" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.640660 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" podUID="27df649b-2572-42d7-a137-6a82a01c482a" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.640678 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" podUID="dbade39a-90d4-49d8-96cc-0a5175783ac1" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.640954 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" podUID="9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.641136 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" podUID="7e8f66c5-67cb-428e-bc4d-9e6e893af682" Nov 22 10:54:34 crc kubenswrapper[4938]: E1122 10:54:34.641417 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:624b77b1b44f5e72a6c7d5910b04eb8070c499f83dcf364fb9dc5f2f8cb83c85\\\"\"" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" podUID="584df814-c2c1-4566-a8d0-930b14020095" Nov 22 10:54:34 crc kubenswrapper[4938]: I1122 10:54:34.712762 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" podStartSLOduration=3.712743377 podStartE2EDuration="3.712743377s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:54:34.711215958 +0000 UTC m=+1007.179053357" watchObservedRunningTime="2025-11-22 10:54:34.712743377 +0000 UTC m=+1007.180580776" Nov 22 10:54:35 crc kubenswrapper[4938]: I1122 10:54:35.642502 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.702951 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" event={"ID":"a8e3c6f8-4a77-4180-a67b-3dab37169c07","Type":"ContainerStarted","Data":"9ced18b1a213b31db3d0363fe4194c0a748b9dea7473d91f6d4fb2f8c59b9d30"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.713709 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" event={"ID":"2415b98c-1a50-4f8d-b094-de51a90a0088","Type":"ContainerStarted","Data":"f78bfbb213ad0981baa5d9fdd8451001c278f961e30e46c525179e89d998b0f9"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.723351 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" event={"ID":"496d9a6a-3979-43af-aa47-9161506bc8e9","Type":"ContainerStarted","Data":"b6d6cad7c4748beab6ddf8e1202be8b507143d5842a23a4a365a585b8ab21932"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.744106 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" event={"ID":"98d39c17-a9b0-483d-b170-eb006b5ee4b9","Type":"ContainerStarted","Data":"b25072d46af1772ceeae3cfa06dcae3fbc0136102415b08dad1156ed2effa4cd"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.754628 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" event={"ID":"ca599052-ab51-498f-882d-895854e272c4","Type":"ContainerStarted","Data":"41c254691cce814225328b28da1c342aa4ca1c89844c4e478cfe48e8b414f340"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.754682 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" event={"ID":"ca599052-ab51-498f-882d-895854e272c4","Type":"ContainerStarted","Data":"58c81aecb760954e84091dcd8af6d29f9a5e08780e8d62888db8c6d0aeffd434"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.754873 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.756863 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" event={"ID":"cba844df-58bc-4d1e-989c-9eb4ccb036b6","Type":"ContainerStarted","Data":"3fb6d35eb936001494205da6f085c87b7399d9c876ffc3aeb909bc1eee0e4343"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.758995 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" event={"ID":"ee7a691c-6232-4e30-b1bf-400c65b8b127","Type":"ContainerStarted","Data":"a3813ea9634018c295563e8e6857692c56bdeb93e4a452a8182090bde1847ec7"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.762524 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7f4bc68b84-bqqvf" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.770265 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" event={"ID":"65118a1f-ed5e-4354-8494-4df42ff6ae6a","Type":"ContainerStarted","Data":"ed53c8fc50080bd29d99046e50977f24f5410939ae986d01ba8509885cc33990"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.780197 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" event={"ID":"1016b115-4617-4a19-a992-91dd5b124c9b","Type":"ContainerStarted","Data":"786456e97db678e5d3a28e1db74e9a7528ef34921cbaebf3b2e1740f194ac7e5"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.790748 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" event={"ID":"fd298c00-9118-413b-bce4-1198393538fa","Type":"ContainerStarted","Data":"c367b806e555c8c62754ebfc2b8828bbb72714eebebb33faff6d024b51fa9b70"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.795585 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" podStartSLOduration=2.928239336 podStartE2EDuration="11.795552894s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.821987565 +0000 UTC m=+1005.289824964" lastFinishedPulling="2025-11-22 10:54:41.689301123 +0000 UTC m=+1014.157138522" observedRunningTime="2025-11-22 10:54:42.790938917 +0000 UTC m=+1015.258776316" watchObservedRunningTime="2025-11-22 10:54:42.795552894 +0000 UTC m=+1015.263390293" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.797292 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" event={"ID":"8659e26c-11d1-4a24-82e3-42e9737a54b8","Type":"ContainerStarted","Data":"9b096fbdea6ec7f7e4d932db09595e55ae45c4062d94d577d96b001716a8a627"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.800814 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" event={"ID":"2a30b9b0-97ac-4268-8d85-193fa80c6b01","Type":"ContainerStarted","Data":"59d2d0f1911855b359cecf6285b70f56554a7fc51aa0ff7feff1dfda503a9b9e"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.800861 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" event={"ID":"2a30b9b0-97ac-4268-8d85-193fa80c6b01","Type":"ContainerStarted","Data":"3393f838172965ef4a44d967dc6c963cce1ec2c0e403e9ba38789c0704b90e6d"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.801540 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.803261 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" event={"ID":"dde3bedc-34b4-41e0-adba-78c802591de5","Type":"ContainerStarted","Data":"172a48900f5758bfeb7302efac3365cbd30079aa974d54e05d36b31a85181428"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.803303 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" event={"ID":"dde3bedc-34b4-41e0-adba-78c802591de5","Type":"ContainerStarted","Data":"bd0bc000058f181e811de453abf0b481bb2cff42ae78edbab39699fe2bdcfd7d"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.803497 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.815103 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" event={"ID":"46844239-10fa-433c-bd82-565bf911989c","Type":"ContainerStarted","Data":"4ab2fd9caf7864fd81849a667ecb45cbd6dcc33bed49e55585645539f3468df7"} Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.882406 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" podStartSLOduration=2.693185057 podStartE2EDuration="11.882388499s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.510218508 +0000 UTC m=+1004.978055907" lastFinishedPulling="2025-11-22 10:54:41.69942196 +0000 UTC m=+1014.167259349" observedRunningTime="2025-11-22 10:54:42.872325744 +0000 UTC m=+1015.340163143" watchObservedRunningTime="2025-11-22 10:54:42.882388499 +0000 UTC m=+1015.350225898" Nov 22 10:54:42 crc kubenswrapper[4938]: I1122 10:54:42.932675 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" podStartSLOduration=3.567902046 podStartE2EDuration="12.932653075s" podCreationTimestamp="2025-11-22 10:54:30 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.361875731 +0000 UTC m=+1004.829713130" lastFinishedPulling="2025-11-22 10:54:41.72662676 +0000 UTC m=+1014.194464159" observedRunningTime="2025-11-22 10:54:42.907686652 +0000 UTC m=+1015.375524071" watchObservedRunningTime="2025-11-22 10:54:42.932653075 +0000 UTC m=+1015.400490474" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.822551 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" event={"ID":"ee7a691c-6232-4e30-b1bf-400c65b8b127","Type":"ContainerStarted","Data":"8bd4f54b57c7a837ae764cf626e7e1805568fb4b6bced49b6c8b79b9357c25c4"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.822699 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.824478 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" event={"ID":"65118a1f-ed5e-4354-8494-4df42ff6ae6a","Type":"ContainerStarted","Data":"bb0d8ccf067a50643fe9aa1b2dee98043081186095fd3c283fd45ee4c058d49a"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.824952 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.827125 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" event={"ID":"46844239-10fa-433c-bd82-565bf911989c","Type":"ContainerStarted","Data":"83c5829b6b6740a1ad855136a32171d95fcb1ac7e9a798a0e91633e6edef22e7"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.827610 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.829439 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" event={"ID":"1016b115-4617-4a19-a992-91dd5b124c9b","Type":"ContainerStarted","Data":"67f4ce680305545f9ae15011b7e1db6a656314ff58fd387197ad3c9297c59955"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.829944 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.832617 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" event={"ID":"cba844df-58bc-4d1e-989c-9eb4ccb036b6","Type":"ContainerStarted","Data":"cbdf0271903553c0634dbf166cf5664896acf87ba3336c51a44e8fa0bf06a880"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.832697 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.834655 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" event={"ID":"98d39c17-a9b0-483d-b170-eb006b5ee4b9","Type":"ContainerStarted","Data":"f58eddf7e02290b6511da122d40d43a30103e99dfdf6dfe81ceaa1d37efa4100"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.834802 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.836286 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" event={"ID":"8659e26c-11d1-4a24-82e3-42e9737a54b8","Type":"ContainerStarted","Data":"806f070fbffc925347adab54fd0384b2ea7e6204b8f159dba5c392ec24220b14"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.836495 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.840631 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" event={"ID":"2415b98c-1a50-4f8d-b094-de51a90a0088","Type":"ContainerStarted","Data":"069302e2a1070c259d7e76d4aaf0babc26dbfba1f17deee34b2ec77dc8a1ed00"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.840781 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.842423 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" event={"ID":"496d9a6a-3979-43af-aa47-9161506bc8e9","Type":"ContainerStarted","Data":"e60484d8a0b25a0c7c1e9f7cea1d7525b9eb1ed374cd2b87fcd9a9efbf6bf2c0"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.842477 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.844059 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" event={"ID":"fd298c00-9118-413b-bce4-1198393538fa","Type":"ContainerStarted","Data":"ad418cbde42221dc0ea54d9c929285148154120f96a9cd0cf76319d0e2fd14b0"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.844704 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.845257 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" podStartSLOduration=3.92868758 podStartE2EDuration="12.845246308s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.809609771 +0000 UTC m=+1005.277447170" lastFinishedPulling="2025-11-22 10:54:41.726168499 +0000 UTC m=+1014.194005898" observedRunningTime="2025-11-22 10:54:43.843206356 +0000 UTC m=+1016.311043755" watchObservedRunningTime="2025-11-22 10:54:43.845246308 +0000 UTC m=+1016.313083697" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.848693 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" event={"ID":"a8e3c6f8-4a77-4180-a67b-3dab37169c07","Type":"ContainerStarted","Data":"96adab2906e6438261318d393834cad990a885867f0733f576b85d50f561d97b"} Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.848723 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.876321 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" podStartSLOduration=3.6145979539999997 podStartE2EDuration="12.876305127s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.465643466 +0000 UTC m=+1004.933480865" lastFinishedPulling="2025-11-22 10:54:41.727350639 +0000 UTC m=+1014.195188038" observedRunningTime="2025-11-22 10:54:43.871745971 +0000 UTC m=+1016.339583380" watchObservedRunningTime="2025-11-22 10:54:43.876305127 +0000 UTC m=+1016.344142526" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.893231 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" podStartSLOduration=4.555225556 podStartE2EDuration="13.893212976s" podCreationTimestamp="2025-11-22 10:54:30 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.377075257 +0000 UTC m=+1004.844912656" lastFinishedPulling="2025-11-22 10:54:41.715062677 +0000 UTC m=+1014.182900076" observedRunningTime="2025-11-22 10:54:43.891545234 +0000 UTC m=+1016.359382633" watchObservedRunningTime="2025-11-22 10:54:43.893212976 +0000 UTC m=+1016.361050375" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.910081 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" podStartSLOduration=4.001481489 podStartE2EDuration="12.910065604s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.822486338 +0000 UTC m=+1005.290323737" lastFinishedPulling="2025-11-22 10:54:41.731070453 +0000 UTC m=+1014.198907852" observedRunningTime="2025-11-22 10:54:43.9051834 +0000 UTC m=+1016.373020799" watchObservedRunningTime="2025-11-22 10:54:43.910065604 +0000 UTC m=+1016.377903003" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.952020 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" podStartSLOduration=4.440461812 podStartE2EDuration="13.952001239s" podCreationTimestamp="2025-11-22 10:54:30 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.214199591 +0000 UTC m=+1004.682036990" lastFinishedPulling="2025-11-22 10:54:41.725739018 +0000 UTC m=+1014.193576417" observedRunningTime="2025-11-22 10:54:43.951469146 +0000 UTC m=+1016.419306555" watchObservedRunningTime="2025-11-22 10:54:43.952001239 +0000 UTC m=+1016.419838628" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.955879 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" podStartSLOduration=3.676410044 podStartE2EDuration="12.955865327s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.418680534 +0000 UTC m=+1004.886517933" lastFinishedPulling="2025-11-22 10:54:41.698135817 +0000 UTC m=+1014.165973216" observedRunningTime="2025-11-22 10:54:43.928745539 +0000 UTC m=+1016.396582938" watchObservedRunningTime="2025-11-22 10:54:43.955865327 +0000 UTC m=+1016.423702726" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.977189 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" podStartSLOduration=3.891766613 podStartE2EDuration="12.977173968s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.675201878 +0000 UTC m=+1005.143039277" lastFinishedPulling="2025-11-22 10:54:41.760609233 +0000 UTC m=+1014.228446632" observedRunningTime="2025-11-22 10:54:43.97290435 +0000 UTC m=+1016.440741749" watchObservedRunningTime="2025-11-22 10:54:43.977173968 +0000 UTC m=+1016.445011367" Nov 22 10:54:43 crc kubenswrapper[4938]: I1122 10:54:43.991868 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" podStartSLOduration=3.784848858 podStartE2EDuration="12.991853341s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.508862914 +0000 UTC m=+1004.976700323" lastFinishedPulling="2025-11-22 10:54:41.715867407 +0000 UTC m=+1014.183704806" observedRunningTime="2025-11-22 10:54:43.990998289 +0000 UTC m=+1016.458835688" watchObservedRunningTime="2025-11-22 10:54:43.991853341 +0000 UTC m=+1016.459690740" Nov 22 10:54:44 crc kubenswrapper[4938]: I1122 10:54:44.009258 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" podStartSLOduration=4.315602055 podStartE2EDuration="13.009241003s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.03369628 +0000 UTC m=+1005.501533679" lastFinishedPulling="2025-11-22 10:54:41.727335228 +0000 UTC m=+1014.195172627" observedRunningTime="2025-11-22 10:54:44.007019546 +0000 UTC m=+1016.474856965" watchObservedRunningTime="2025-11-22 10:54:44.009241003 +0000 UTC m=+1016.477078402" Nov 22 10:54:44 crc kubenswrapper[4938]: I1122 10:54:44.026000 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" podStartSLOduration=3.970906173 podStartE2EDuration="13.025980668s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.674760197 +0000 UTC m=+1005.142597596" lastFinishedPulling="2025-11-22 10:54:41.729834692 +0000 UTC m=+1014.197672091" observedRunningTime="2025-11-22 10:54:44.020418006 +0000 UTC m=+1016.488255425" watchObservedRunningTime="2025-11-22 10:54:44.025980668 +0000 UTC m=+1016.493818067" Nov 22 10:54:44 crc kubenswrapper[4938]: I1122 10:54:44.039081 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" podStartSLOduration=3.771400017 podStartE2EDuration="13.03905971s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.438131768 +0000 UTC m=+1004.905969167" lastFinishedPulling="2025-11-22 10:54:41.705791461 +0000 UTC m=+1014.173628860" observedRunningTime="2025-11-22 10:54:44.038163097 +0000 UTC m=+1016.506000506" watchObservedRunningTime="2025-11-22 10:54:44.03905971 +0000 UTC m=+1016.506897109" Nov 22 10:54:48 crc kubenswrapper[4938]: I1122 10:54:48.888684 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" event={"ID":"27df649b-2572-42d7-a137-6a82a01c482a","Type":"ContainerStarted","Data":"8f2e29079925ff3159b687ac9f022f29efd2f694d53070d3dc421762c1b9ab61"} Nov 22 10:54:48 crc kubenswrapper[4938]: I1122 10:54:48.891288 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" event={"ID":"7e8f66c5-67cb-428e-bc4d-9e6e893af682","Type":"ContainerStarted","Data":"3de82e8a1beb81f14939d2224a6605cdf752687427f2b2665844f5ae035eccfa"} Nov 22 10:54:48 crc kubenswrapper[4938]: I1122 10:54:48.891510 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:54:48 crc kubenswrapper[4938]: I1122 10:54:48.925565 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p" podStartSLOduration=2.830268917 podStartE2EDuration="17.925546163s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.16169188 +0000 UTC m=+1005.629529279" lastFinishedPulling="2025-11-22 10:54:48.256969126 +0000 UTC m=+1020.724806525" observedRunningTime="2025-11-22 10:54:48.902810366 +0000 UTC m=+1021.370647755" watchObservedRunningTime="2025-11-22 10:54:48.925546163 +0000 UTC m=+1021.393383562" Nov 22 10:54:48 crc kubenswrapper[4938]: I1122 10:54:48.941157 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" podStartSLOduration=2.808095534 podStartE2EDuration="17.941136979s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.076550938 +0000 UTC m=+1005.544388337" lastFinishedPulling="2025-11-22 10:54:48.209592383 +0000 UTC m=+1020.677429782" observedRunningTime="2025-11-22 10:54:48.934371287 +0000 UTC m=+1021.402208706" watchObservedRunningTime="2025-11-22 10:54:48.941136979 +0000 UTC m=+1021.408974378" Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.908629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" event={"ID":"6ceba7c3-c04c-4449-9788-ed341bdaceb7","Type":"ContainerStarted","Data":"d9a07e9257db1ed2412d1f2a70eab31088e496d19259d7ae40023731b9b30cdd"} Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.909190 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.910369 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" event={"ID":"9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95","Type":"ContainerStarted","Data":"0c480aecc524dea0b85d84e9d40f3ee9a63a7c97b15299025b9d93d0142dca09"} Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.910545 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.929418 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" podStartSLOduration=3.177805041 podStartE2EDuration="19.929379516s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.052176109 +0000 UTC m=+1005.520013508" lastFinishedPulling="2025-11-22 10:54:49.803750584 +0000 UTC m=+1022.271587983" observedRunningTime="2025-11-22 10:54:50.926567345 +0000 UTC m=+1023.394404744" watchObservedRunningTime="2025-11-22 10:54:50.929379516 +0000 UTC m=+1023.397216925" Nov 22 10:54:50 crc kubenswrapper[4938]: I1122 10:54:50.942406 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" podStartSLOduration=3.318807282 podStartE2EDuration="19.942385956s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.17744856 +0000 UTC m=+1005.645285959" lastFinishedPulling="2025-11-22 10:54:49.801027234 +0000 UTC m=+1022.268864633" observedRunningTime="2025-11-22 10:54:50.939306418 +0000 UTC m=+1023.407143837" watchObservedRunningTime="2025-11-22 10:54:50.942385956 +0000 UTC m=+1023.410223355" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.312132 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-mrpj9" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.326464 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-jhgfc" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.376504 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-8b8mj" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.383524 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-6f95d84fd6-s2d7b" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.395493 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-j8ftc" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.465974 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-r9lgr" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.616941 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-tb846" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.694217 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-gxvrt" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.711356 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-646fd589f9-gc4lw" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.742815 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-58879495c-xjw2q" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.771243 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-dzpvv" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.826740 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-qps9n" Nov 22 10:54:51 crc kubenswrapper[4938]: I1122 10:54:51.850248 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-dx2bq" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.106580 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-4rmnr" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.926076 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" event={"ID":"c933df24-871e-4075-b48f-f8903914716b","Type":"ContainerStarted","Data":"53efb0c4262c85e372017c78046f5093cd300115632089efaa19520721f331d7"} Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.926654 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.928017 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" event={"ID":"454affdc-b63c-4696-914f-f2abbf7896ca","Type":"ContainerStarted","Data":"653ec5ee9332d033f35bb366412fa18cc5cc94ea9273887fb538973b78441cd2"} Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.928179 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.931469 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" event={"ID":"dbade39a-90d4-49d8-96cc-0a5175783ac1","Type":"ContainerStarted","Data":"4dae84aa3423c98613a369bb3dd0b4c7385f1a9553d8fec7066c0f1c9e7a0ede"} Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.931636 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.933809 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" event={"ID":"584df814-c2c1-4566-a8d0-930b14020095","Type":"ContainerStarted","Data":"0d015d2a2e1a4012ec774dcb7f36931fe8d3bf2d9a1126b11bc07c9d7a197d9f"} Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.934060 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.947989 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" podStartSLOduration=2.535063803 podStartE2EDuration="21.947964035s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.920987499 +0000 UTC m=+1005.388824898" lastFinishedPulling="2025-11-22 10:54:52.333887731 +0000 UTC m=+1024.801725130" observedRunningTime="2025-11-22 10:54:52.946458346 +0000 UTC m=+1025.414295745" watchObservedRunningTime="2025-11-22 10:54:52.947964035 +0000 UTC m=+1025.415801434" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.962628 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" podStartSLOduration=2.497001365 podStartE2EDuration="21.962610717s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:32.950024685 +0000 UTC m=+1005.417862074" lastFinishedPulling="2025-11-22 10:54:52.415634027 +0000 UTC m=+1024.883471426" observedRunningTime="2025-11-22 10:54:52.961297313 +0000 UTC m=+1025.429134712" watchObservedRunningTime="2025-11-22 10:54:52.962610717 +0000 UTC m=+1025.430448116" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.977378 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" podStartSLOduration=2.649176589 podStartE2EDuration="21.977355361s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.0518003 +0000 UTC m=+1005.519637699" lastFinishedPulling="2025-11-22 10:54:52.379979072 +0000 UTC m=+1024.847816471" observedRunningTime="2025-11-22 10:54:52.975023042 +0000 UTC m=+1025.442860441" watchObservedRunningTime="2025-11-22 10:54:52.977355361 +0000 UTC m=+1025.445192760" Nov 22 10:54:52 crc kubenswrapper[4938]: I1122 10:54:52.992194 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" podStartSLOduration=2.701412935 podStartE2EDuration="21.992177547s" podCreationTimestamp="2025-11-22 10:54:31 +0000 UTC" firstStartedPulling="2025-11-22 10:54:33.054776595 +0000 UTC m=+1005.522613994" lastFinishedPulling="2025-11-22 10:54:52.345541207 +0000 UTC m=+1024.813378606" observedRunningTime="2025-11-22 10:54:52.99029642 +0000 UTC m=+1025.458133829" watchObservedRunningTime="2025-11-22 10:54:52.992177547 +0000 UTC m=+1025.460014946" Nov 22 10:55:01 crc kubenswrapper[4938]: I1122 10:55:01.742505 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-qq5ww" Nov 22 10:55:01 crc kubenswrapper[4938]: I1122 10:55:01.864455 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-98nqr" Nov 22 10:55:01 crc kubenswrapper[4938]: I1122 10:55:01.953236 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5r8n9" Nov 22 10:55:02 crc kubenswrapper[4938]: I1122 10:55:02.019161 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-v457z" Nov 22 10:55:02 crc kubenswrapper[4938]: I1122 10:55:02.068710 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-77db6bf9c-s5pww" Nov 22 10:55:02 crc kubenswrapper[4938]: I1122 10:55:02.182407 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-z8ksz" Nov 22 10:55:02 crc kubenswrapper[4938]: I1122 10:55:02.576162 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-2l2nm" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.310538 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.314173 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.316968 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.321338 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.321604 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-w6xb9" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.321694 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.326965 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.388917 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.390172 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.396636 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.406640 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.444155 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.444216 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpfkb\" (UniqueName: \"kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.545358 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.545442 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpfkb\" (UniqueName: \"kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.545477 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.545522 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.545548 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbgzc\" (UniqueName: \"kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.546534 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.567663 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpfkb\" (UniqueName: \"kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb\") pod \"dnsmasq-dns-675f4bcbfc-bphzz\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.632295 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.646493 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.646860 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.646893 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbgzc\" (UniqueName: \"kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.648245 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.648310 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.665349 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbgzc\" (UniqueName: \"kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc\") pod \"dnsmasq-dns-78dd6ddcc-g5bhp\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:20 crc kubenswrapper[4938]: I1122 10:55:20.704169 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:21 crc kubenswrapper[4938]: I1122 10:55:21.116788 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:21 crc kubenswrapper[4938]: I1122 10:55:21.122555 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:55:21 crc kubenswrapper[4938]: I1122 10:55:21.131684 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" event={"ID":"07683aaf-0d62-4748-8c02-f6e04a71e4f2","Type":"ContainerStarted","Data":"823165ca8ae7fccc5020c37af2493c4f2b17223b22c58b6c7280c55e40fbe375"} Nov 22 10:55:21 crc kubenswrapper[4938]: I1122 10:55:21.176543 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:55:21 crc kubenswrapper[4938]: W1122 10:55:21.179339 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff5a2928_a2b7_4bed_9a9e_4f9699f47a0c.slice/crio-57f011d098b4e8dd14482f23efe3b8cd30286370b53855fad0d95b8af198bdb8 WatchSource:0}: Error finding container 57f011d098b4e8dd14482f23efe3b8cd30286370b53855fad0d95b8af198bdb8: Status 404 returned error can't find the container with id 57f011d098b4e8dd14482f23efe3b8cd30286370b53855fad0d95b8af198bdb8 Nov 22 10:55:22 crc kubenswrapper[4938]: I1122 10:55:22.139511 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" event={"ID":"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c","Type":"ContainerStarted","Data":"57f011d098b4e8dd14482f23efe3b8cd30286370b53855fad0d95b8af198bdb8"} Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.514209 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.546416 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.547843 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.559904 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.701776 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.701852 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl2dd\" (UniqueName: \"kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.701876 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.803028 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl2dd\" (UniqueName: \"kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.803095 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.803194 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.804115 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.804138 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.825359 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl2dd\" (UniqueName: \"kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd\") pod \"dnsmasq-dns-666b6646f7-kn262\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.857310 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.869537 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.891268 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.892485 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:23 crc kubenswrapper[4938]: I1122 10:55:23.939529 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.013951 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.014037 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbbpb\" (UniqueName: \"kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.014076 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.116770 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.116890 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbbpb\" (UniqueName: \"kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.116940 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.117810 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.118361 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.144796 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbbpb\" (UniqueName: \"kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb\") pod \"dnsmasq-dns-57d769cc4f-lqtjd\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.261384 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.324070 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:55:24 crc kubenswrapper[4938]: W1122 10:55:24.336030 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e7894a2_70db_4b4b_8a34_f5630794ef18.slice/crio-7329d362b1c94d6a07045a9d9d92a1f854a2be06b1113a7750d3b7ca1ff30057 WatchSource:0}: Error finding container 7329d362b1c94d6a07045a9d9d92a1f854a2be06b1113a7750d3b7ca1ff30057: Status 404 returned error can't find the container with id 7329d362b1c94d6a07045a9d9d92a1f854a2be06b1113a7750d3b7ca1ff30057 Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.667198 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:55:24 crc kubenswrapper[4938]: W1122 10:55:24.670055 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod523d2f5d_1b2a_4b67_85ac_838c0435ecb2.slice/crio-4914a10063bb0846d9010eac7e2b574bed168ea2a368c71cfacc0164785836c3 WatchSource:0}: Error finding container 4914a10063bb0846d9010eac7e2b574bed168ea2a368c71cfacc0164785836c3: Status 404 returned error can't find the container with id 4914a10063bb0846d9010eac7e2b574bed168ea2a368c71cfacc0164785836c3 Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.689403 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.690886 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.695427 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.695644 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.695711 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.695892 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.695904 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.696047 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f846b" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.696075 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.702475 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831005 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831055 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831081 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831099 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831146 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831193 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831232 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp4n9\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831259 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831273 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.831297 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.932796 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.932860 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.932880 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.932900 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.932997 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933064 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933107 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp4n9\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933140 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933158 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933181 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933200 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.933963 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.934004 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.934030 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.934157 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.934218 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.934340 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.938716 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.939087 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.939222 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.939658 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.951571 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp4n9\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4938]: I1122 10:55:24.962980 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.001841 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.004866 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016442 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016551 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016607 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016677 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016757 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.016816 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.020886 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-g42d6" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.027454 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.028973 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136198 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136253 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136293 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136405 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136518 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136547 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136566 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcb7l\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136614 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136711 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136753 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.136885 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.170732 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kn262" event={"ID":"2e7894a2-70db-4b4b-8a34-f5630794ef18","Type":"ContainerStarted","Data":"7329d362b1c94d6a07045a9d9d92a1f854a2be06b1113a7750d3b7ca1ff30057"} Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.172766 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" event={"ID":"523d2f5d-1b2a-4b67-85ac-838c0435ecb2","Type":"ContainerStarted","Data":"4914a10063bb0846d9010eac7e2b574bed168ea2a368c71cfacc0164785836c3"} Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.241818 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.241890 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.241939 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.241972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.241996 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242030 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242049 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242085 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242099 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242114 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcb7l\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242131 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.242398 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.243041 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.243200 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.243262 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.243481 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.243698 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.246682 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.246849 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.247159 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.247380 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.261856 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcb7l\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.279596 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.341556 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.520612 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:25 crc kubenswrapper[4938]: I1122 10:55:25.798539 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:25 crc kubenswrapper[4938]: W1122 10:55:25.806568 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18ebf838_be34_4ba1_b8f0_031a5477ca78.slice/crio-899bad9c334e83c81503618b39d5978b3b51e65bb0e20c31e969cf3b93a0e44d WatchSource:0}: Error finding container 899bad9c334e83c81503618b39d5978b3b51e65bb0e20c31e969cf3b93a0e44d: Status 404 returned error can't find the container with id 899bad9c334e83c81503618b39d5978b3b51e65bb0e20c31e969cf3b93a0e44d Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.180222 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerStarted","Data":"899bad9c334e83c81503618b39d5978b3b51e65bb0e20c31e969cf3b93a0e44d"} Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.181895 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerStarted","Data":"a0edf03f75c3983853acde238cca29fecc048964579a3198fbc12bdd6fce816b"} Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.492837 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.494350 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.499121 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.499267 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-tzdwf" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.499211 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.500820 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.501964 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.509460 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.522236 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.661550 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9kgd\" (UniqueName: \"kubernetes.io/projected/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kube-api-access-b9kgd\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.661905 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.661947 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.661965 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.662016 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-default\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.662053 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.662077 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-secrets\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.662107 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.662123 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kolla-config\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.763387 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.763811 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kolla-config\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.763747 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764036 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9kgd\" (UniqueName: \"kubernetes.io/projected/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kube-api-access-b9kgd\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764085 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764108 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764126 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764193 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-default\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764244 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764266 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-secrets\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764622 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.764789 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kolla-config\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.765381 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-config-data-default\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.767487 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2caea9-4690-48c1-909a-05ba8dbf34d4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.772760 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-secrets\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.783046 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.786880 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2caea9-4690-48c1-909a-05ba8dbf34d4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.787051 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9kgd\" (UniqueName: \"kubernetes.io/projected/1e2caea9-4690-48c1-909a-05ba8dbf34d4-kube-api-access-b9kgd\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.798836 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"1e2caea9-4690-48c1-909a-05ba8dbf34d4\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4938]: I1122 10:55:26.813505 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.276513 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:27 crc kubenswrapper[4938]: W1122 10:55:27.292451 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e2caea9_4690_48c1_909a_05ba8dbf34d4.slice/crio-461e292493dc887f878972911a78ce74749cbe3f52818f4ed742e3d445c09a8c WatchSource:0}: Error finding container 461e292493dc887f878972911a78ce74749cbe3f52818f4ed742e3d445c09a8c: Status 404 returned error can't find the container with id 461e292493dc887f878972911a78ce74749cbe3f52818f4ed742e3d445c09a8c Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.802894 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.804494 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.809348 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-fss94" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.809793 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.810031 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.812243 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.816018 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879393 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879720 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879753 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879788 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879817 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879833 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.879989 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.880248 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.880306 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8lsh\" (UniqueName: \"kubernetes.io/projected/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kube-api-access-g8lsh\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.981893 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982067 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982134 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982218 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982283 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982304 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982384 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982463 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.982542 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8lsh\" (UniqueName: \"kubernetes.io/projected/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kube-api-access-g8lsh\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.983837 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.983913 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.983956 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.984179 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.984593 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.990910 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4938]: I1122 10:55:27.991183 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.013857 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8lsh\" (UniqueName: \"kubernetes.io/projected/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-kube-api-access-g8lsh\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.013966 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0404ed3a-da0f-4ba3-953b-e1f3dca9d53b-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.035017 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.067708 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.068897 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.074771 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.074795 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.075113 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-wz4ld" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.082596 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.132427 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.184780 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-config-data\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.184831 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.184860 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.184900 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-kolla-config\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.185054 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j9jn\" (UniqueName: \"kubernetes.io/projected/7e219ab3-870c-4d69-99b6-79758b76a271-kube-api-access-8j9jn\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.245229 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1e2caea9-4690-48c1-909a-05ba8dbf34d4","Type":"ContainerStarted","Data":"461e292493dc887f878972911a78ce74749cbe3f52818f4ed742e3d445c09a8c"} Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.303679 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-config-data\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.303723 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.303753 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.303782 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-kolla-config\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.303819 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j9jn\" (UniqueName: \"kubernetes.io/projected/7e219ab3-870c-4d69-99b6-79758b76a271-kube-api-access-8j9jn\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.304890 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-config-data\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.305087 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7e219ab3-870c-4d69-99b6-79758b76a271-kolla-config\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.314809 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-memcached-tls-certs\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.315717 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e219ab3-870c-4d69-99b6-79758b76a271-combined-ca-bundle\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.324166 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j9jn\" (UniqueName: \"kubernetes.io/projected/7e219ab3-870c-4d69-99b6-79758b76a271-kube-api-access-8j9jn\") pod \"memcached-0\" (UID: \"7e219ab3-870c-4d69-99b6-79758b76a271\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.442786 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4938]: I1122 10:55:28.981165 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:29 crc kubenswrapper[4938]: W1122 10:55:29.007708 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e219ab3_870c_4d69_99b6_79758b76a271.slice/crio-dec6ab0dfacb926cba3c683032fae47d91499e9b69613d723453bba712f68077 WatchSource:0}: Error finding container dec6ab0dfacb926cba3c683032fae47d91499e9b69613d723453bba712f68077: Status 404 returned error can't find the container with id dec6ab0dfacb926cba3c683032fae47d91499e9b69613d723453bba712f68077 Nov 22 10:55:29 crc kubenswrapper[4938]: I1122 10:55:29.073577 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:29 crc kubenswrapper[4938]: I1122 10:55:29.271665 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7e219ab3-870c-4d69-99b6-79758b76a271","Type":"ContainerStarted","Data":"dec6ab0dfacb926cba3c683032fae47d91499e9b69613d723453bba712f68077"} Nov 22 10:55:29 crc kubenswrapper[4938]: I1122 10:55:29.275301 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b","Type":"ContainerStarted","Data":"22b159d9ed1252955a5f37c21060ac910faf9af6fe78014c07cddc6098556d76"} Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.065757 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.066660 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.088374 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.089468 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-btpdw" Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.138462 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb2f2\" (UniqueName: \"kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2\") pod \"kube-state-metrics-0\" (UID: \"15e6e3f1-ae9e-4a70-8342-74d6554ec24c\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.240125 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb2f2\" (UniqueName: \"kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2\") pod \"kube-state-metrics-0\" (UID: \"15e6e3f1-ae9e-4a70-8342-74d6554ec24c\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.278766 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb2f2\" (UniqueName: \"kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2\") pod \"kube-state-metrics-0\" (UID: \"15e6e3f1-ae9e-4a70-8342-74d6554ec24c\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4938]: I1122 10:55:30.426709 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:55:31 crc kubenswrapper[4938]: I1122 10:55:31.037412 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:31 crc kubenswrapper[4938]: I1122 10:55:31.355053 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"15e6e3f1-ae9e-4a70-8342-74d6554ec24c","Type":"ContainerStarted","Data":"57ac01815fc4b156d536faf55964bd2f26226c6e71b2fd5b5171639a6b60f8b1"} Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.761447 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.764716 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.768081 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-lcw4c" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.769354 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.771572 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.771753 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.772980 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.788205 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.804335 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49r8x\" (UniqueName: \"kubernetes.io/projected/e7f61b47-1155-4776-acf0-0cb9ea53af1a-kube-api-access-49r8x\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.804411 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.804464 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.809820 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.809892 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.810078 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.810123 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.810164 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912161 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912239 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912275 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912303 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912342 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49r8x\" (UniqueName: \"kubernetes.io/projected/e7f61b47-1155-4776-acf0-0cb9ea53af1a-kube-api-access-49r8x\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912369 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912396 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912411 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.912716 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.913089 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.914484 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.915039 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7f61b47-1155-4776-acf0-0cb9ea53af1a-config\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.919480 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.919525 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.929032 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7f61b47-1155-4776-acf0-0cb9ea53af1a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.931730 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49r8x\" (UniqueName: \"kubernetes.io/projected/e7f61b47-1155-4776-acf0-0cb9ea53af1a-kube-api-access-49r8x\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:33 crc kubenswrapper[4938]: I1122 10:55:33.936334 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e7f61b47-1155-4776-acf0-0cb9ea53af1a\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.090222 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.474612 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-sq62w"] Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.475729 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.479521 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-895q7" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.484900 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.485258 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.487645 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sq62w"] Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.519898 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-dg79z"] Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.521893 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-scripts\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.521953 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-combined-ca-bundle\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.521979 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.521998 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.522103 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-ovn-controller-tls-certs\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.522123 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-log-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.522160 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvdgc\" (UniqueName: \"kubernetes.io/projected/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-kube-api-access-nvdgc\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.522344 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.533078 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dg79z"] Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623414 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-etc-ovs\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623500 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-run\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623542 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-scripts\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623567 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-log\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623599 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-combined-ca-bundle\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623632 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623654 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623700 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-scripts\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623785 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-ovn-controller-tls-certs\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623813 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-log-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623836 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-lib\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623872 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvdgc\" (UniqueName: \"kubernetes.io/projected/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-kube-api-access-nvdgc\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.623903 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcv9g\" (UniqueName: \"kubernetes.io/projected/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-kube-api-access-bcv9g\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.624253 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.624336 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-log-ovn\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.624358 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-var-run\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.626228 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-scripts\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.628255 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-combined-ca-bundle\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.628701 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-ovn-controller-tls-certs\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.638893 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvdgc\" (UniqueName: \"kubernetes.io/projected/a2ffb7f9-f83c-4e71-af53-3d116e260d8e-kube-api-access-nvdgc\") pod \"ovn-controller-sq62w\" (UID: \"a2ffb7f9-f83c-4e71-af53-3d116e260d8e\") " pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725531 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-lib\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725585 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcv9g\" (UniqueName: \"kubernetes.io/projected/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-kube-api-access-bcv9g\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725611 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-etc-ovs\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725646 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-run\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725855 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-lib\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725837 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-etc-ovs\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725875 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-log\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725969 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-log\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.726022 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-scripts\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.725826 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-var-run\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.727660 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-scripts\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.740559 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcv9g\" (UniqueName: \"kubernetes.io/projected/62cfd59e-5bd6-48ef-9990-1f29ec6d155a-kube-api-access-bcv9g\") pod \"ovn-controller-ovs-dg79z\" (UID: \"62cfd59e-5bd6-48ef-9990-1f29ec6d155a\") " pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.800429 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w" Nov 22 10:55:34 crc kubenswrapper[4938]: I1122 10:55:34.839386 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.419407 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.444638 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.444537 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.447971 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.448120 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-gbv7p" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.448344 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.448367 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.469690 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.469784 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.469867 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.470382 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.470548 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lgxp\" (UniqueName: \"kubernetes.io/projected/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-kube-api-access-2lgxp\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.470599 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-config\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.470719 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.470776 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572212 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572567 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572600 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572646 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572673 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lgxp\" (UniqueName: \"kubernetes.io/projected/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-kube-api-access-2lgxp\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572693 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-config\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572765 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.572869 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.573346 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.573611 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-config\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.574335 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.578573 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.578792 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.583905 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.591434 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lgxp\" (UniqueName: \"kubernetes.io/projected/0e21a120-bf8c-43ad-b7fe-48e11f0a0545-kube-api-access-2lgxp\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.596010 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"0e21a120-bf8c-43ad-b7fe-48e11f0a0545\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:37 crc kubenswrapper[4938]: I1122 10:55:37.780290 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.218111 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.219377 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g8lsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(0404ed3a-da0f-4ba3-953b-e1f3dca9d53b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.221224 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="0404ed3a-da0f-4ba3-953b-e1f3dca9d53b" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.272545 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.272759 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b9kgd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(1e2caea9-4690-48c1-909a-05ba8dbf34d4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.274049 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="1e2caea9-4690-48c1-909a-05ba8dbf34d4" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.461772 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="0404ed3a-da0f-4ba3-953b-e1f3dca9d53b" Nov 22 10:55:45 crc kubenswrapper[4938]: E1122 10:55:45.475980 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="1e2caea9-4690-48c1-909a-05ba8dbf34d4" Nov 22 10:55:56 crc kubenswrapper[4938]: E1122 10:55:56.813572 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 22 10:55:56 crc kubenswrapper[4938]: E1122 10:55:56.814249 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n64bh576h57dh8dh647hcch65ch58dh5bbh594h88h57bh98hch59bh645h5b9h688h546h5f6h5ffh666h5d6hb6h55hbdh67fh5b9h576h5b9h696h5cbq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8j9jn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(7e219ab3-870c-4d69-99b6-79758b76a271): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:56 crc kubenswrapper[4938]: E1122 10:55:56.815830 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="7e219ab3-870c-4d69-99b6-79758b76a271" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.006670 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.006941 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dcb7l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(18ebf838-be34-4ba1-b8f0-031a5477ca78): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.008106 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.518195 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.518802 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gpfkb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-bphzz_openstack(07683aaf-0d62-4748-8c02-f6e04a71e4f2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.519984 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" podUID="07683aaf-0d62-4748-8c02-f6e04a71e4f2" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.521452 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.521661 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fl2dd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-kn262_openstack(2e7894a2-70db-4b4b-8a34-f5630794ef18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.523320 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-kn262" podUID="2e7894a2-70db-4b4b-8a34-f5630794ef18" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.535938 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.536110 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mbgzc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-g5bhp_openstack(ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.541026 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" podUID="ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.568086 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-kn262" podUID="2e7894a2-70db-4b4b-8a34-f5630794ef18" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.568480 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="7e219ab3-870c-4d69-99b6-79758b76a271" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.614334 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.614471 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lbbpb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-lqtjd_openstack(523d2f5d-1b2a-4b67-85ac-838c0435ecb2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:57 crc kubenswrapper[4938]: E1122 10:55:57.615685 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" podUID="523d2f5d-1b2a-4b67-85ac-838c0435ecb2" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.009622 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dg79z"] Nov 22 10:55:58 crc kubenswrapper[4938]: W1122 10:55:58.095024 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62cfd59e_5bd6_48ef_9990_1f29ec6d155a.slice/crio-1e4bb83bfda7e05939c6738831916143821b69d6ac9393f2175526faf1e9114b WatchSource:0}: Error finding container 1e4bb83bfda7e05939c6738831916143821b69d6ac9393f2175526faf1e9114b: Status 404 returned error can't find the container with id 1e4bb83bfda7e05939c6738831916143821b69d6ac9393f2175526faf1e9114b Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.101319 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sq62w"] Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.165802 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.168255 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.215092 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpfkb\" (UniqueName: \"kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb\") pod \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.215156 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config\") pod \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\" (UID: \"07683aaf-0d62-4748-8c02-f6e04a71e4f2\") " Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.215810 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config" (OuterVolumeSpecName: "config") pod "07683aaf-0d62-4748-8c02-f6e04a71e4f2" (UID: "07683aaf-0d62-4748-8c02-f6e04a71e4f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.221119 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb" (OuterVolumeSpecName: "kube-api-access-gpfkb") pod "07683aaf-0d62-4748-8c02-f6e04a71e4f2" (UID: "07683aaf-0d62-4748-8c02-f6e04a71e4f2"). InnerVolumeSpecName "kube-api-access-gpfkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:58 crc kubenswrapper[4938]: W1122 10:55:58.266250 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7f61b47_1155_4776_acf0_0cb9ea53af1a.slice/crio-3ef78160112d694a5d3a674e476ecd9dfe49c318b47d7932db60874c7b63a416 WatchSource:0}: Error finding container 3ef78160112d694a5d3a674e476ecd9dfe49c318b47d7932db60874c7b63a416: Status 404 returned error can't find the container with id 3ef78160112d694a5d3a674e476ecd9dfe49c318b47d7932db60874c7b63a416 Nov 22 10:55:58 crc kubenswrapper[4938]: W1122 10:55:58.268941 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2ffb7f9_f83c_4e71_af53_3d116e260d8e.slice/crio-4f2f61c51348ad7e8678824a57e6cf9d7576ae6f87c45236e9ac28605ae18013 WatchSource:0}: Error finding container 4f2f61c51348ad7e8678824a57e6cf9d7576ae6f87c45236e9ac28605ae18013: Status 404 returned error can't find the container with id 4f2f61c51348ad7e8678824a57e6cf9d7576ae6f87c45236e9ac28605ae18013 Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.317662 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpfkb\" (UniqueName: \"kubernetes.io/projected/07683aaf-0d62-4748-8c02-f6e04a71e4f2-kube-api-access-gpfkb\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.317698 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07683aaf-0d62-4748-8c02-f6e04a71e4f2-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.334924 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.334981 4938 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.335210 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kb2f2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(15e6e3f1-ae9e-4a70-8342-74d6554ec24c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" logger="UnhandledError" Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.336444 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.572636 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dg79z" event={"ID":"62cfd59e-5bd6-48ef-9990-1f29ec6d155a","Type":"ContainerStarted","Data":"1e4bb83bfda7e05939c6738831916143821b69d6ac9393f2175526faf1e9114b"} Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.574065 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1e2caea9-4690-48c1-909a-05ba8dbf34d4","Type":"ContainerStarted","Data":"0b757483e2c85632fa0fd837f05823dcee7043b71909bf1bd920259aab77d294"} Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.575734 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" event={"ID":"07683aaf-0d62-4748-8c02-f6e04a71e4f2","Type":"ContainerDied","Data":"823165ca8ae7fccc5020c37af2493c4f2b17223b22c58b6c7280c55e40fbe375"} Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.575802 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-bphzz" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.579047 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e7f61b47-1155-4776-acf0-0cb9ea53af1a","Type":"ContainerStarted","Data":"3ef78160112d694a5d3a674e476ecd9dfe49c318b47d7932db60874c7b63a416"} Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.580573 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w" event={"ID":"a2ffb7f9-f83c-4e71-af53-3d116e260d8e","Type":"ContainerStarted","Data":"4f2f61c51348ad7e8678824a57e6cf9d7576ae6f87c45236e9ac28605ae18013"} Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.581760 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" podUID="523d2f5d-1b2a-4b67-85ac-838c0435ecb2" Nov 22 10:55:58 crc kubenswrapper[4938]: E1122 10:55:58.584674 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.685033 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.690631 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-bphzz"] Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.903241 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:58 crc kubenswrapper[4938]: I1122 10:55:58.936098 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:58 crc kubenswrapper[4938]: W1122 10:55:58.944705 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e21a120_bf8c_43ad_b7fe_48e11f0a0545.slice/crio-d845d223eb56e5b33d2e296377b2b0133c96fce77af77072517055dcb7124393 WatchSource:0}: Error finding container d845d223eb56e5b33d2e296377b2b0133c96fce77af77072517055dcb7124393: Status 404 returned error can't find the container with id d845d223eb56e5b33d2e296377b2b0133c96fce77af77072517055dcb7124393 Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.029894 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc\") pod \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.029975 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config\") pod \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.030030 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbgzc\" (UniqueName: \"kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc\") pod \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\" (UID: \"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c\") " Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.030396 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config" (OuterVolumeSpecName: "config") pod "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c" (UID: "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.030429 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c" (UID: "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.034664 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc" (OuterVolumeSpecName: "kube-api-access-mbgzc") pod "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c" (UID: "ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c"). InnerVolumeSpecName "kube-api-access-mbgzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.131828 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.131870 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbgzc\" (UniqueName: \"kubernetes.io/projected/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-kube-api-access-mbgzc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.131884 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.587850 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"0e21a120-bf8c-43ad-b7fe-48e11f0a0545","Type":"ContainerStarted","Data":"d845d223eb56e5b33d2e296377b2b0133c96fce77af77072517055dcb7124393"} Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.588720 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" event={"ID":"ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c","Type":"ContainerDied","Data":"57f011d098b4e8dd14482f23efe3b8cd30286370b53855fad0d95b8af198bdb8"} Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.588779 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-g5bhp" Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.658627 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:55:59 crc kubenswrapper[4938]: I1122 10:55:59.664955 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-g5bhp"] Nov 22 10:56:00 crc kubenswrapper[4938]: I1122 10:56:00.460246 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07683aaf-0d62-4748-8c02-f6e04a71e4f2" path="/var/lib/kubelet/pods/07683aaf-0d62-4748-8c02-f6e04a71e4f2/volumes" Nov 22 10:56:00 crc kubenswrapper[4938]: I1122 10:56:00.461044 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c" path="/var/lib/kubelet/pods/ff5a2928-a2b7-4bed-9a9e-4f9699f47a0c/volumes" Nov 22 10:56:00 crc kubenswrapper[4938]: I1122 10:56:00.596759 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerStarted","Data":"ba096eb02091d8c3a9903e2e267ae7540fdc7f45825b897f8f3fd5cde5794d36"} Nov 22 10:56:00 crc kubenswrapper[4938]: I1122 10:56:00.598538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerStarted","Data":"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.615685 4938 generic.go:334] "Generic (PLEG): container finished" podID="62cfd59e-5bd6-48ef-9990-1f29ec6d155a" containerID="f276ccadba64bca0a9ee9b4aa6c9290f9951642e772108bf988f14c8117939ac" exitCode=0 Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.615975 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dg79z" event={"ID":"62cfd59e-5bd6-48ef-9990-1f29ec6d155a","Type":"ContainerDied","Data":"f276ccadba64bca0a9ee9b4aa6c9290f9951642e772108bf988f14c8117939ac"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.619528 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b","Type":"ContainerStarted","Data":"65ba8fdff183227faae063b7dc62edffdc4f0fb3b09978ff25151a97fe27a62c"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.622501 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"0e21a120-bf8c-43ad-b7fe-48e11f0a0545","Type":"ContainerStarted","Data":"145601272e17f5784162b484fd72748b766afbcd9a7fadf7e82e1adc5ca3471f"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.624026 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e7f61b47-1155-4776-acf0-0cb9ea53af1a","Type":"ContainerStarted","Data":"efd612cd1e8985dd3273ad240d1801783feef066f6424eef604bb2cb72142f4d"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.625611 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w" event={"ID":"a2ffb7f9-f83c-4e71-af53-3d116e260d8e","Type":"ContainerStarted","Data":"a9a7ec7eb7b0029f80793eea07720f286a174268f358711fa03a28282ea12e72"} Nov 22 10:56:02 crc kubenswrapper[4938]: I1122 10:56:02.625868 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-sq62w" Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.634716 4938 generic.go:334] "Generic (PLEG): container finished" podID="1e2caea9-4690-48c1-909a-05ba8dbf34d4" containerID="0b757483e2c85632fa0fd837f05823dcee7043b71909bf1bd920259aab77d294" exitCode=0 Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.634797 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1e2caea9-4690-48c1-909a-05ba8dbf34d4","Type":"ContainerDied","Data":"0b757483e2c85632fa0fd837f05823dcee7043b71909bf1bd920259aab77d294"} Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.640273 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dg79z" event={"ID":"62cfd59e-5bd6-48ef-9990-1f29ec6d155a","Type":"ContainerStarted","Data":"17164fcdb4b3aba4fef9f9b7c778f6796fbf0fcf6efce496579806dc7060b5b6"} Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.640310 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.640321 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dg79z" event={"ID":"62cfd59e-5bd6-48ef-9990-1f29ec6d155a","Type":"ContainerStarted","Data":"555c9e9d3f951f21aac4517f13aba79fac808a1608c1794d4098e0fac86ce69c"} Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.640341 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.656513 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-sq62w" podStartSLOduration=26.249021154 podStartE2EDuration="29.656498064s" podCreationTimestamp="2025-11-22 10:55:34 +0000 UTC" firstStartedPulling="2025-11-22 10:55:58.270964472 +0000 UTC m=+1090.738801871" lastFinishedPulling="2025-11-22 10:56:01.678441392 +0000 UTC m=+1094.146278781" observedRunningTime="2025-11-22 10:56:02.679176408 +0000 UTC m=+1095.147013807" watchObservedRunningTime="2025-11-22 10:56:03.656498064 +0000 UTC m=+1096.124335463" Nov 22 10:56:03 crc kubenswrapper[4938]: I1122 10:56:03.677028 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-dg79z" podStartSLOduration=26.114038862 podStartE2EDuration="29.677012242s" podCreationTimestamp="2025-11-22 10:55:34 +0000 UTC" firstStartedPulling="2025-11-22 10:55:58.098268998 +0000 UTC m=+1090.566106397" lastFinishedPulling="2025-11-22 10:56:01.661242378 +0000 UTC m=+1094.129079777" observedRunningTime="2025-11-22 10:56:03.673142424 +0000 UTC m=+1096.140979833" watchObservedRunningTime="2025-11-22 10:56:03.677012242 +0000 UTC m=+1096.144849641" Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.654125 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e7f61b47-1155-4776-acf0-0cb9ea53af1a","Type":"ContainerStarted","Data":"25ef4a6864b5505b3d462b63cbdd517e1e5ee67fb4fafeac7c2a5e7fe7e06223"} Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.656357 4938 generic.go:334] "Generic (PLEG): container finished" podID="0404ed3a-da0f-4ba3-953b-e1f3dca9d53b" containerID="65ba8fdff183227faae063b7dc62edffdc4f0fb3b09978ff25151a97fe27a62c" exitCode=0 Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.656450 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b","Type":"ContainerDied","Data":"65ba8fdff183227faae063b7dc62edffdc4f0fb3b09978ff25151a97fe27a62c"} Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.658396 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1e2caea9-4690-48c1-909a-05ba8dbf34d4","Type":"ContainerStarted","Data":"46c21e622781d3b27bd3fe92da725e1816a08f722268d4df767295d2b85acfd8"} Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.661278 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"0e21a120-bf8c-43ad-b7fe-48e11f0a0545","Type":"ContainerStarted","Data":"f30c7c3b7ba745d642342febcc1ef06e86b6508a46a8b782c00f8d46bb027d63"} Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.679077 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=27.182258739 podStartE2EDuration="33.679056181s" podCreationTimestamp="2025-11-22 10:55:32 +0000 UTC" firstStartedPulling="2025-11-22 10:55:58.269396292 +0000 UTC m=+1090.737233691" lastFinishedPulling="2025-11-22 10:56:04.766193734 +0000 UTC m=+1097.234031133" observedRunningTime="2025-11-22 10:56:05.672273829 +0000 UTC m=+1098.140111278" watchObservedRunningTime="2025-11-22 10:56:05.679056181 +0000 UTC m=+1098.146893590" Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.738738 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.651176461 podStartE2EDuration="40.738720498s" podCreationTimestamp="2025-11-22 10:55:25 +0000 UTC" firstStartedPulling="2025-11-22 10:55:27.294137342 +0000 UTC m=+1059.761974741" lastFinishedPulling="2025-11-22 10:55:58.381681379 +0000 UTC m=+1090.849518778" observedRunningTime="2025-11-22 10:56:05.713487091 +0000 UTC m=+1098.181324500" watchObservedRunningTime="2025-11-22 10:56:05.738720498 +0000 UTC m=+1098.206557907" Nov 22 10:56:05 crc kubenswrapper[4938]: I1122 10:56:05.742332 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=23.934160197 podStartE2EDuration="29.74232459s" podCreationTimestamp="2025-11-22 10:55:36 +0000 UTC" firstStartedPulling="2025-11-22 10:55:58.951204209 +0000 UTC m=+1091.419041608" lastFinishedPulling="2025-11-22 10:56:04.759368602 +0000 UTC m=+1097.227206001" observedRunningTime="2025-11-22 10:56:05.732702956 +0000 UTC m=+1098.200540375" watchObservedRunningTime="2025-11-22 10:56:05.74232459 +0000 UTC m=+1098.210161989" Nov 22 10:56:06 crc kubenswrapper[4938]: I1122 10:56:06.671091 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0404ed3a-da0f-4ba3-953b-e1f3dca9d53b","Type":"ContainerStarted","Data":"38e6db14fb1061dfcc9b2f4b5d03e3c277b349c6c37d3f0c2f719a87a55881c0"} Nov 22 10:56:06 crc kubenswrapper[4938]: I1122 10:56:06.693921 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371996.160887 podStartE2EDuration="40.693888903s" podCreationTimestamp="2025-11-22 10:55:26 +0000 UTC" firstStartedPulling="2025-11-22 10:55:29.114823928 +0000 UTC m=+1061.582661327" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:06.688798134 +0000 UTC m=+1099.156635583" watchObservedRunningTime="2025-11-22 10:56:06.693888903 +0000 UTC m=+1099.161726302" Nov 22 10:56:06 crc kubenswrapper[4938]: I1122 10:56:06.814442 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 22 10:56:06 crc kubenswrapper[4938]: I1122 10:56:06.814519 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.090495 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.121978 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.680583 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.715990 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.781395 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.781676 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.826923 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 22 10:56:07 crc kubenswrapper[4938]: I1122 10:56:07.959266 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.003577 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.005139 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.013086 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.025022 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-48rkq"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.026484 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.033197 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.035120 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.042984 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-48rkq"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.076352 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.076454 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.076474 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.076539 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8dcj\" (UniqueName: \"kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.133242 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.133322 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180378 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8dcj\" (UniqueName: \"kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180741 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-config\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180774 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180863 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180926 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovs-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.180976 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.181016 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovn-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.181036 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-combined-ca-bundle\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.181063 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p25k\" (UniqueName: \"kubernetes.io/projected/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-kube-api-access-7p25k\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.182369 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.185018 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.182948 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.212783 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8dcj\" (UniqueName: \"kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj\") pod \"dnsmasq-dns-6bc7876d45-58mnt\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.284874 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovs-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.285007 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.285076 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovn-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.285099 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-combined-ca-bundle\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.285123 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p25k\" (UniqueName: \"kubernetes.io/projected/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-kube-api-access-7p25k\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.285150 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-config\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.286650 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovs-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.291074 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-ovn-rundir\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.291161 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-config\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.296557 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.299605 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-combined-ca-bundle\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.324313 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p25k\" (UniqueName: \"kubernetes.io/projected/a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7-kube-api-access-7p25k\") pod \"ovn-controller-metrics-48rkq\" (UID: \"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7\") " pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.335250 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.352248 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-48rkq" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.429392 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.430706 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.488212 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbbpb\" (UniqueName: \"kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb\") pod \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.488267 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config\") pod \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.488333 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc\") pod \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\" (UID: \"523d2f5d-1b2a-4b67-85ac-838c0435ecb2\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.489242 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "523d2f5d-1b2a-4b67-85ac-838c0435ecb2" (UID: "523d2f5d-1b2a-4b67-85ac-838c0435ecb2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.490279 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config" (OuterVolumeSpecName: "config") pod "523d2f5d-1b2a-4b67-85ac-838c0435ecb2" (UID: "523d2f5d-1b2a-4b67-85ac-838c0435ecb2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.501126 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb" (OuterVolumeSpecName: "kube-api-access-lbbpb") pod "523d2f5d-1b2a-4b67-85ac-838c0435ecb2" (UID: "523d2f5d-1b2a-4b67-85ac-838c0435ecb2"). InnerVolumeSpecName "kube-api-access-lbbpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.532316 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.534974 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.535106 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.541928 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590060 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590140 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590168 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590284 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590384 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jqd6\" (UniqueName: \"kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590467 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbbpb\" (UniqueName: \"kubernetes.io/projected/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-kube-api-access-lbbpb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590494 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.590507 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/523d2f5d-1b2a-4b67-85ac-838c0435ecb2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.690675 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" event={"ID":"523d2f5d-1b2a-4b67-85ac-838c0435ecb2","Type":"ContainerDied","Data":"4914a10063bb0846d9010eac7e2b574bed168ea2a368c71cfacc0164785836c3"} Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.690789 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-lqtjd" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.691721 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.691801 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jqd6\" (UniqueName: \"kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.691855 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.691874 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.691889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.693078 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.693405 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.695572 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.696172 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.713091 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jqd6\" (UniqueName: \"kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6\") pod \"dnsmasq-dns-8554648995-pzkcn\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.739604 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.749214 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.750614 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.760736 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-lqtjd"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.794845 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config\") pod \"2e7894a2-70db-4b4b-8a34-f5630794ef18\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.795062 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl2dd\" (UniqueName: \"kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd\") pod \"2e7894a2-70db-4b4b-8a34-f5630794ef18\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.795120 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc\") pod \"2e7894a2-70db-4b4b-8a34-f5630794ef18\" (UID: \"2e7894a2-70db-4b4b-8a34-f5630794ef18\") " Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.795406 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config" (OuterVolumeSpecName: "config") pod "2e7894a2-70db-4b4b-8a34-f5630794ef18" (UID: "2e7894a2-70db-4b4b-8a34-f5630794ef18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.795837 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.796815 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2e7894a2-70db-4b4b-8a34-f5630794ef18" (UID: "2e7894a2-70db-4b4b-8a34-f5630794ef18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.801996 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd" (OuterVolumeSpecName: "kube-api-access-fl2dd") pod "2e7894a2-70db-4b4b-8a34-f5630794ef18" (UID: "2e7894a2-70db-4b4b-8a34-f5630794ef18"). InnerVolumeSpecName "kube-api-access-fl2dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.876422 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.897507 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e7894a2-70db-4b4b-8a34-f5630794ef18-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.897545 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl2dd\" (UniqueName: \"kubernetes.io/projected/2e7894a2-70db-4b4b-8a34-f5630794ef18-kube-api-access-fl2dd\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.945236 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.959142 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-48rkq"] Nov 22 10:56:08 crc kubenswrapper[4938]: W1122 10:56:08.966102 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79aa7573_3064_49a8_8d09_bcb97db52e9d.slice/crio-128ebde745b1da0ea9c3b3d32d835e9c5dfae7d0885eac4ce45e444ef4bde7d5 WatchSource:0}: Error finding container 128ebde745b1da0ea9c3b3d32d835e9c5dfae7d0885eac4ce45e444ef4bde7d5: Status 404 returned error can't find the container with id 128ebde745b1da0ea9c3b3d32d835e9c5dfae7d0885eac4ce45e444ef4bde7d5 Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.977260 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.980738 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.982346 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.982864 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.983001 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-mgvrx" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.983199 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 22 10:56:08 crc kubenswrapper[4938]: W1122 10:56:08.983392 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3fc26cc_0b37_4f3b_a4ed_fd5817d30df7.slice/crio-2f5a573dfdeaf1a5543ed337ee106f2cfdcd0eb08e7a3359bfc860f748d23c4a WatchSource:0}: Error finding container 2f5a573dfdeaf1a5543ed337ee106f2cfdcd0eb08e7a3359bfc860f748d23c4a: Status 404 returned error can't find the container with id 2f5a573dfdeaf1a5543ed337ee106f2cfdcd0eb08e7a3359bfc860f748d23c4a Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998403 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2z2f\" (UniqueName: \"kubernetes.io/projected/05abbe70-d68c-4b0b-a6a3-580b764f3014-kube-api-access-n2z2f\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998437 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998460 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998509 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998605 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998628 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-config\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:08 crc kubenswrapper[4938]: I1122 10:56:08.998662 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-scripts\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.013653 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.099721 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100038 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100069 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-config\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100117 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-scripts\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100167 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2z2f\" (UniqueName: \"kubernetes.io/projected/05abbe70-d68c-4b0b-a6a3-580b764f3014-kube-api-access-n2z2f\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100198 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.100222 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.101426 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-scripts\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.101499 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05abbe70-d68c-4b0b-a6a3-580b764f3014-config\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.101780 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.108950 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.109130 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.114820 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/05abbe70-d68c-4b0b-a6a3-580b764f3014-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.118145 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2z2f\" (UniqueName: \"kubernetes.io/projected/05abbe70-d68c-4b0b-a6a3-580b764f3014-kube-api-access-n2z2f\") pod \"ovn-northd-0\" (UID: \"05abbe70-d68c-4b0b-a6a3-580b764f3014\") " pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.307764 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.365685 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:09 crc kubenswrapper[4938]: W1122 10:56:09.375154 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ec93715_8ee1_4fa9_aae3_cd1fcc983154.slice/crio-b21bfc78e0cb14efc99d270b56e01946f564d68face2e9e6d8839798fdc85b66 WatchSource:0}: Error finding container b21bfc78e0cb14efc99d270b56e01946f564d68face2e9e6d8839798fdc85b66: Status 404 returned error can't find the container with id b21bfc78e0cb14efc99d270b56e01946f564d68face2e9e6d8839798fdc85b66 Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.697650 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-48rkq" event={"ID":"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7","Type":"ContainerStarted","Data":"cecf2be05e6b191342302d75e1e5c5537f0678b3061cfcded8f79061737760e6"} Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.698013 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-48rkq" event={"ID":"a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7","Type":"ContainerStarted","Data":"2f5a573dfdeaf1a5543ed337ee106f2cfdcd0eb08e7a3359bfc860f748d23c4a"} Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.699349 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" event={"ID":"79aa7573-3064-49a8-8d09-bcb97db52e9d","Type":"ContainerStarted","Data":"128ebde745b1da0ea9c3b3d32d835e9c5dfae7d0885eac4ce45e444ef4bde7d5"} Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.700431 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kn262" event={"ID":"2e7894a2-70db-4b4b-8a34-f5630794ef18","Type":"ContainerDied","Data":"7329d362b1c94d6a07045a9d9d92a1f854a2be06b1113a7750d3b7ca1ff30057"} Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.700450 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kn262" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.701739 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pzkcn" event={"ID":"2ec93715-8ee1-4fa9-aae3-cd1fcc983154","Type":"ContainerStarted","Data":"b21bfc78e0cb14efc99d270b56e01946f564d68face2e9e6d8839798fdc85b66"} Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.720311 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-48rkq" podStartSLOduration=2.720289765 podStartE2EDuration="2.720289765s" podCreationTimestamp="2025-11-22 10:56:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:09.714611361 +0000 UTC m=+1102.182448770" watchObservedRunningTime="2025-11-22 10:56:09.720289765 +0000 UTC m=+1102.188127164" Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.734024 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.766044 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:56:09 crc kubenswrapper[4938]: I1122 10:56:09.774705 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kn262"] Nov 22 10:56:10 crc kubenswrapper[4938]: I1122 10:56:10.459132 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e7894a2-70db-4b4b-8a34-f5630794ef18" path="/var/lib/kubelet/pods/2e7894a2-70db-4b4b-8a34-f5630794ef18/volumes" Nov 22 10:56:10 crc kubenswrapper[4938]: I1122 10:56:10.459744 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="523d2f5d-1b2a-4b67-85ac-838c0435ecb2" path="/var/lib/kubelet/pods/523d2f5d-1b2a-4b67-85ac-838c0435ecb2/volumes" Nov 22 10:56:10 crc kubenswrapper[4938]: I1122 10:56:10.716640 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"05abbe70-d68c-4b0b-a6a3-580b764f3014","Type":"ContainerStarted","Data":"119f7a51b8db932f0e4e24643047d6ecfd56b41b9f67cd15d4d8bc59218de53e"} Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.300433 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.300825 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.723497 4938 generic.go:334] "Generic (PLEG): container finished" podID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerID="aebc55a9c2528c08f6da64aac4d04d8ee59d006300069cc0524a6b3485d80a30" exitCode=0 Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.723552 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" event={"ID":"79aa7573-3064-49a8-8d09-bcb97db52e9d","Type":"ContainerDied","Data":"aebc55a9c2528c08f6da64aac4d04d8ee59d006300069cc0524a6b3485d80a30"} Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.726115 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"05abbe70-d68c-4b0b-a6a3-580b764f3014","Type":"ContainerStarted","Data":"00e08603534ceae08ed58ce7e596a7aa12c452ee68e7f6e9de0ea467b4441ab5"} Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.726164 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"05abbe70-d68c-4b0b-a6a3-580b764f3014","Type":"ContainerStarted","Data":"5cb1a3bedc10f8b41a2117ff536dc7c4a0e5adcef36c6b7751ee2119398a5519"} Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.726242 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.728077 4938 generic.go:334] "Generic (PLEG): container finished" podID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerID="0848bb1e06033757b2cd45b7e3358a62926f89ca37db015907bcfbad26b9c33a" exitCode=0 Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.728110 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pzkcn" event={"ID":"2ec93715-8ee1-4fa9-aae3-cd1fcc983154","Type":"ContainerDied","Data":"0848bb1e06033757b2cd45b7e3358a62926f89ca37db015907bcfbad26b9c33a"} Nov 22 10:56:11 crc kubenswrapper[4938]: I1122 10:56:11.757675 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.22663626 podStartE2EDuration="3.757651756s" podCreationTimestamp="2025-11-22 10:56:08 +0000 UTC" firstStartedPulling="2025-11-22 10:56:09.743009289 +0000 UTC m=+1102.210846688" lastFinishedPulling="2025-11-22 10:56:11.274024785 +0000 UTC m=+1103.741862184" observedRunningTime="2025-11-22 10:56:11.756030605 +0000 UTC m=+1104.223868014" watchObservedRunningTime="2025-11-22 10:56:11.757651756 +0000 UTC m=+1104.225489155" Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.736539 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pzkcn" event={"ID":"2ec93715-8ee1-4fa9-aae3-cd1fcc983154","Type":"ContainerStarted","Data":"d98eca2b077a7ea106b1ba2a59453e5af94409602992331fad8b1b03ee25ab14"} Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.737241 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.738528 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" event={"ID":"79aa7573-3064-49a8-8d09-bcb97db52e9d","Type":"ContainerStarted","Data":"c6e35e51748e7a4bb171c5d24594009f0f72744c81de847ebf7213a3fa796681"} Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.754642 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-pzkcn" podStartSLOduration=3.482907765 podStartE2EDuration="4.754628328s" podCreationTimestamp="2025-11-22 10:56:08 +0000 UTC" firstStartedPulling="2025-11-22 10:56:09.379436223 +0000 UTC m=+1101.847273622" lastFinishedPulling="2025-11-22 10:56:10.651156786 +0000 UTC m=+1103.118994185" observedRunningTime="2025-11-22 10:56:12.752165956 +0000 UTC m=+1105.220003385" watchObservedRunningTime="2025-11-22 10:56:12.754628328 +0000 UTC m=+1105.222465727" Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.771278 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" podStartSLOduration=4.155321937 podStartE2EDuration="5.771257238s" podCreationTimestamp="2025-11-22 10:56:07 +0000 UTC" firstStartedPulling="2025-11-22 10:56:08.971491065 +0000 UTC m=+1101.439328464" lastFinishedPulling="2025-11-22 10:56:10.587426366 +0000 UTC m=+1103.055263765" observedRunningTime="2025-11-22 10:56:12.767826591 +0000 UTC m=+1105.235664000" watchObservedRunningTime="2025-11-22 10:56:12.771257238 +0000 UTC m=+1105.239094647" Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.889775 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 22 10:56:12 crc kubenswrapper[4938]: I1122 10:56:12.931771 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 22 10:56:13 crc kubenswrapper[4938]: I1122 10:56:13.336826 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:14 crc kubenswrapper[4938]: I1122 10:56:14.754693 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"7e219ab3-870c-4d69-99b6-79758b76a271","Type":"ContainerStarted","Data":"f642356d0cc676f503eed840e722ad080ab9fd37d42d28ae5134e77ca878979e"} Nov 22 10:56:14 crc kubenswrapper[4938]: I1122 10:56:14.755459 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 22 10:56:14 crc kubenswrapper[4938]: I1122 10:56:14.775233 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.086532672 podStartE2EDuration="46.775217534s" podCreationTimestamp="2025-11-22 10:55:28 +0000 UTC" firstStartedPulling="2025-11-22 10:55:29.016034032 +0000 UTC m=+1061.483871441" lastFinishedPulling="2025-11-22 10:56:13.704718894 +0000 UTC m=+1106.172556303" observedRunningTime="2025-11-22 10:56:14.770837343 +0000 UTC m=+1107.238674732" watchObservedRunningTime="2025-11-22 10:56:14.775217534 +0000 UTC m=+1107.243054933" Nov 22 10:56:17 crc kubenswrapper[4938]: I1122 10:56:17.288536 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 22 10:56:17 crc kubenswrapper[4938]: I1122 10:56:17.339019 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="0404ed3a-da0f-4ba3-953b-e1f3dca9d53b" containerName="galera" probeResult="failure" output=< Nov 22 10:56:17 crc kubenswrapper[4938]: wsrep_local_state_comment (Joined) differs from Synced Nov 22 10:56:17 crc kubenswrapper[4938]: > Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.087870 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-r44fd"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.090111 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.092243 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r44fd"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.189669 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.249544 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr5gv\" (UniqueName: \"kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv\") pod \"keystone-db-create-r44fd\" (UID: \"82b8dcb0-b9c8-4773-a64f-e6d6e89df241\") " pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.337105 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.351761 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr5gv\" (UniqueName: \"kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv\") pod \"keystone-db-create-r44fd\" (UID: \"82b8dcb0-b9c8-4773-a64f-e6d6e89df241\") " pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.373829 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr5gv\" (UniqueName: \"kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv\") pod \"keystone-db-create-r44fd\" (UID: \"82b8dcb0-b9c8-4773-a64f-e6d6e89df241\") " pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.444852 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.471610 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.490542 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-zw4nk"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.491677 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.510834 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zw4nk"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.555593 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbmxn\" (UniqueName: \"kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn\") pod \"placement-db-create-zw4nk\" (UID: \"67463515-1e01-40aa-b47b-e15d1dc63ef8\") " pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.657469 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbmxn\" (UniqueName: \"kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn\") pod \"placement-db-create-zw4nk\" (UID: \"67463515-1e01-40aa-b47b-e15d1dc63ef8\") " pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.682590 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbmxn\" (UniqueName: \"kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn\") pod \"placement-db-create-zw4nk\" (UID: \"67463515-1e01-40aa-b47b-e15d1dc63ef8\") " pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.716462 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-w48hh"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.717888 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w48hh" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.725691 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-w48hh"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.791324 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"15e6e3f1-ae9e-4a70-8342-74d6554ec24c","Type":"ContainerStarted","Data":"1e006da9203224e6cd4b9b07506bbd01e2737b7e51070df4e965e9ade9faf8e2"} Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.791681 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.810256 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.828631841 podStartE2EDuration="48.810235752s" podCreationTimestamp="2025-11-22 10:55:30 +0000 UTC" firstStartedPulling="2025-11-22 10:55:31.0664003 +0000 UTC m=+1063.534237699" lastFinishedPulling="2025-11-22 10:56:18.048004211 +0000 UTC m=+1110.515841610" observedRunningTime="2025-11-22 10:56:18.806054936 +0000 UTC m=+1111.273892335" watchObservedRunningTime="2025-11-22 10:56:18.810235752 +0000 UTC m=+1111.278073151" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.862452 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tt78\" (UniqueName: \"kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78\") pod \"glance-db-create-w48hh\" (UID: \"8e880927-eabf-4d36-ae6d-40aa89780b1b\") " pod="openstack/glance-db-create-w48hh" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.878106 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.898820 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.950836 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.951225 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="dnsmasq-dns" containerID="cri-o://c6e35e51748e7a4bb171c5d24594009f0f72744c81de847ebf7213a3fa796681" gracePeriod=10 Nov 22 10:56:18 crc kubenswrapper[4938]: I1122 10:56:18.964476 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tt78\" (UniqueName: \"kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78\") pod \"glance-db-create-w48hh\" (UID: \"8e880927-eabf-4d36-ae6d-40aa89780b1b\") " pod="openstack/glance-db-create-w48hh" Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.005757 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r44fd"] Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.005802 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tt78\" (UniqueName: \"kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78\") pod \"glance-db-create-w48hh\" (UID: \"8e880927-eabf-4d36-ae6d-40aa89780b1b\") " pod="openstack/glance-db-create-w48hh" Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.050528 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w48hh" Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.546631 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zw4nk"] Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.684613 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-w48hh"] Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.801102 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r44fd" event={"ID":"82b8dcb0-b9c8-4773-a64f-e6d6e89df241","Type":"ContainerStarted","Data":"a9afe0852c244f1a9e61616de98b8ce5f0bbd91626d13eccd5d9e782b1e8a69e"} Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.801436 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r44fd" event={"ID":"82b8dcb0-b9c8-4773-a64f-e6d6e89df241","Type":"ContainerStarted","Data":"de687cbd7af967415d6a5c16639d1d056c438a904b9a7acdb368f4e10eeb22b7"} Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.809016 4938 generic.go:334] "Generic (PLEG): container finished" podID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerID="c6e35e51748e7a4bb171c5d24594009f0f72744c81de847ebf7213a3fa796681" exitCode=0 Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.809107 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" event={"ID":"79aa7573-3064-49a8-8d09-bcb97db52e9d","Type":"ContainerDied","Data":"c6e35e51748e7a4bb171c5d24594009f0f72744c81de847ebf7213a3fa796681"} Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.810577 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w48hh" event={"ID":"8e880927-eabf-4d36-ae6d-40aa89780b1b","Type":"ContainerStarted","Data":"0e488651e5888dde7bcd2711c659498f358d2317b2bd7fbb7a2dfd0dc47b39be"} Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.812098 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zw4nk" event={"ID":"67463515-1e01-40aa-b47b-e15d1dc63ef8","Type":"ContainerStarted","Data":"dc0865147cc4e1b3618449ea3cb27fbf290db487943f2370dfcec6ebddb039bf"} Nov 22 10:56:19 crc kubenswrapper[4938]: I1122 10:56:19.817446 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-r44fd" podStartSLOduration=1.817427972 podStartE2EDuration="1.817427972s" podCreationTimestamp="2025-11-22 10:56:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:19.816400146 +0000 UTC m=+1112.284237545" watchObservedRunningTime="2025-11-22 10:56:19.817427972 +0000 UTC m=+1112.285265371" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.098043 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.190474 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8dcj\" (UniqueName: \"kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj\") pod \"79aa7573-3064-49a8-8d09-bcb97db52e9d\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.190611 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc\") pod \"79aa7573-3064-49a8-8d09-bcb97db52e9d\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.190680 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb\") pod \"79aa7573-3064-49a8-8d09-bcb97db52e9d\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.190711 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config\") pod \"79aa7573-3064-49a8-8d09-bcb97db52e9d\" (UID: \"79aa7573-3064-49a8-8d09-bcb97db52e9d\") " Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.195765 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj" (OuterVolumeSpecName: "kube-api-access-l8dcj") pod "79aa7573-3064-49a8-8d09-bcb97db52e9d" (UID: "79aa7573-3064-49a8-8d09-bcb97db52e9d"). InnerVolumeSpecName "kube-api-access-l8dcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.227500 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config" (OuterVolumeSpecName: "config") pod "79aa7573-3064-49a8-8d09-bcb97db52e9d" (UID: "79aa7573-3064-49a8-8d09-bcb97db52e9d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.231809 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "79aa7573-3064-49a8-8d09-bcb97db52e9d" (UID: "79aa7573-3064-49a8-8d09-bcb97db52e9d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.240226 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "79aa7573-3064-49a8-8d09-bcb97db52e9d" (UID: "79aa7573-3064-49a8-8d09-bcb97db52e9d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.293259 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.293295 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.293307 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79aa7573-3064-49a8-8d09-bcb97db52e9d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.293315 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8dcj\" (UniqueName: \"kubernetes.io/projected/79aa7573-3064-49a8-8d09-bcb97db52e9d-kube-api-access-l8dcj\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.484075 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:56:20 crc kubenswrapper[4938]: E1122 10:56:20.484632 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="init" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.484648 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="init" Nov 22 10:56:20 crc kubenswrapper[4938]: E1122 10:56:20.484662 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="dnsmasq-dns" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.484668 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="dnsmasq-dns" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.484832 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" containerName="dnsmasq-dns" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.490597 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.524838 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.608195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x7t5\" (UniqueName: \"kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.608400 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.608516 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.609038 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.609059 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.710646 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.710731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x7t5\" (UniqueName: \"kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.710815 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.710861 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.710897 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.711880 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.712081 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.712237 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.712758 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.731737 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x7t5\" (UniqueName: \"kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5\") pod \"dnsmasq-dns-b8fbc5445-rsr4t\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.819183 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w48hh" event={"ID":"8e880927-eabf-4d36-ae6d-40aa89780b1b","Type":"ContainerStarted","Data":"f411d9ecb9ff3d4af45cd9a9ff9ab36212f4ea144391afa122c76d81c0158f38"} Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.822245 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zw4nk" event={"ID":"67463515-1e01-40aa-b47b-e15d1dc63ef8","Type":"ContainerStarted","Data":"89d3eaf62d690ce5ec14a79c6c8bc1632b827cd50359c60dc463ece747bc0e72"} Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.822594 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.829021 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.829493 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-58mnt" event={"ID":"79aa7573-3064-49a8-8d09-bcb97db52e9d","Type":"ContainerDied","Data":"128ebde745b1da0ea9c3b3d32d835e9c5dfae7d0885eac4ce45e444ef4bde7d5"} Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.829529 4938 scope.go:117] "RemoveContainer" containerID="c6e35e51748e7a4bb171c5d24594009f0f72744c81de847ebf7213a3fa796681" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.838414 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-w48hh" podStartSLOduration=2.838399529 podStartE2EDuration="2.838399529s" podCreationTimestamp="2025-11-22 10:56:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:20.836115882 +0000 UTC m=+1113.303953281" watchObservedRunningTime="2025-11-22 10:56:20.838399529 +0000 UTC m=+1113.306236928" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.852249 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.858058 4938 scope.go:117] "RemoveContainer" containerID="aebc55a9c2528c08f6da64aac4d04d8ee59d006300069cc0524a6b3485d80a30" Nov 22 10:56:20 crc kubenswrapper[4938]: I1122 10:56:20.859483 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-58mnt"] Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.226542 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.615927 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.623172 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.625858 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.626058 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-nvs4g" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.626874 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.628759 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.641322 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.728342 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-lock\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.728443 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.728472 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.728663 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-cache\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.728848 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqjlt\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-kube-api-access-kqjlt\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.829995 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830046 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830096 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-cache\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830179 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqjlt\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-kube-api-access-kqjlt\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: E1122 10:56:21.830185 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:21 crc kubenswrapper[4938]: E1122 10:56:21.830207 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830224 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-lock\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: E1122 10:56:21.830253 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:22.330236681 +0000 UTC m=+1114.798074080 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830492 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830731 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-cache\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.830781 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7a0f58e0-5202-4792-bd1a-64966c18450f-lock\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.835995 4938 generic.go:334] "Generic (PLEG): container finished" podID="8e880927-eabf-4d36-ae6d-40aa89780b1b" containerID="f411d9ecb9ff3d4af45cd9a9ff9ab36212f4ea144391afa122c76d81c0158f38" exitCode=0 Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.836032 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w48hh" event={"ID":"8e880927-eabf-4d36-ae6d-40aa89780b1b","Type":"ContainerDied","Data":"f411d9ecb9ff3d4af45cd9a9ff9ab36212f4ea144391afa122c76d81c0158f38"} Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.837367 4938 generic.go:334] "Generic (PLEG): container finished" podID="67463515-1e01-40aa-b47b-e15d1dc63ef8" containerID="89d3eaf62d690ce5ec14a79c6c8bc1632b827cd50359c60dc463ece747bc0e72" exitCode=0 Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.837403 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zw4nk" event={"ID":"67463515-1e01-40aa-b47b-e15d1dc63ef8","Type":"ContainerDied","Data":"89d3eaf62d690ce5ec14a79c6c8bc1632b827cd50359c60dc463ece747bc0e72"} Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.839224 4938 generic.go:334] "Generic (PLEG): container finished" podID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerID="ddaf5f77568ede8f0097270dc83639f3c5c260b3818c6185c44296a3fea73865" exitCode=0 Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.839298 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" event={"ID":"878d8a17-a88c-43ae-930e-01a652f87d2b","Type":"ContainerDied","Data":"ddaf5f77568ede8f0097270dc83639f3c5c260b3818c6185c44296a3fea73865"} Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.839333 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" event={"ID":"878d8a17-a88c-43ae-930e-01a652f87d2b","Type":"ContainerStarted","Data":"3a4db7547dfdf5540bc4a08d07cdb766bc786a50c4bda3cd37eb52ce393ba630"} Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.840752 4938 generic.go:334] "Generic (PLEG): container finished" podID="82b8dcb0-b9c8-4773-a64f-e6d6e89df241" containerID="a9afe0852c244f1a9e61616de98b8ce5f0bbd91626d13eccd5d9e782b1e8a69e" exitCode=0 Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.840799 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r44fd" event={"ID":"82b8dcb0-b9c8-4773-a64f-e6d6e89df241","Type":"ContainerDied","Data":"a9afe0852c244f1a9e61616de98b8ce5f0bbd91626d13eccd5d9e782b1e8a69e"} Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.857337 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqjlt\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-kube-api-access-kqjlt\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:21 crc kubenswrapper[4938]: I1122 10:56:21.870260 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.098820 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-lpx7v"] Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.101559 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.104830 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.105789 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.106125 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.117904 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-lpx7v"] Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.160295 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-xn65g"] Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.161487 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.175099 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-lpx7v"] Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.187304 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xn65g"] Nov 22 10:56:22 crc kubenswrapper[4938]: E1122 10:56:22.209858 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-mh6ll ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-lpx7v" podUID="86c2e50c-fd7c-42a3-9c94-07aa0751d391" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251222 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251286 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251319 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251458 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251517 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251584 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.251948 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252160 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252283 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252345 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5f2z\" (UniqueName: \"kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252382 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252429 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh6ll\" (UniqueName: \"kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252588 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.252644 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354142 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354196 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354221 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354249 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354266 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354286 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354307 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354325 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354353 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354369 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354402 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354464 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354486 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5f2z\" (UniqueName: \"kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354504 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.354519 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh6ll\" (UniqueName: \"kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.355524 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: E1122 10:56:22.355652 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:22 crc kubenswrapper[4938]: E1122 10:56:22.355673 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:22 crc kubenswrapper[4938]: E1122 10:56:22.355716 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:23.355696139 +0000 UTC m=+1115.823533538 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.356333 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.356967 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.357074 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.360183 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.360187 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.360269 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.360404 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.360838 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.362818 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.367729 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.369546 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.374751 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5f2z\" (UniqueName: \"kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z\") pod \"swift-ring-rebalance-xn65g\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.378079 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh6ll\" (UniqueName: \"kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll\") pod \"swift-ring-rebalance-lpx7v\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.460413 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79aa7573-3064-49a8-8d09-bcb97db52e9d" path="/var/lib/kubelet/pods/79aa7573-3064-49a8-8d09-bcb97db52e9d/volumes" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.515844 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.854068 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.854115 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" event={"ID":"878d8a17-a88c-43ae-930e-01a652f87d2b","Type":"ContainerStarted","Data":"a6c5504d4e82ebc60278076f0055b48c5966ef6e35797f69681db5d528f8674d"} Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.868976 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.899751 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podStartSLOduration=2.8997245449999998 podStartE2EDuration="2.899724545s" podCreationTimestamp="2025-11-22 10:56:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:22.886711317 +0000 UTC m=+1115.354548736" watchObservedRunningTime="2025-11-22 10:56:22.899724545 +0000 UTC m=+1115.367561954" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.951986 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xn65g"] Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976276 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976352 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh6ll\" (UniqueName: \"kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976464 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976565 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976601 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.976627 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle\") pod \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\" (UID: \"86c2e50c-fd7c-42a3-9c94-07aa0751d391\") " Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.977184 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.977247 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts" (OuterVolumeSpecName: "scripts") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.977634 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.984167 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.985322 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll" (OuterVolumeSpecName: "kube-api-access-mh6ll") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "kube-api-access-mh6ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.986311 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:22 crc kubenswrapper[4938]: I1122 10:56:22.986413 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "86c2e50c-fd7c-42a3-9c94-07aa0751d391" (UID: "86c2e50c-fd7c-42a3-9c94-07aa0751d391"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079173 4938 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079207 4938 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079222 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh6ll\" (UniqueName: \"kubernetes.io/projected/86c2e50c-fd7c-42a3-9c94-07aa0751d391-kube-api-access-mh6ll\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079234 4938 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/86c2e50c-fd7c-42a3-9c94-07aa0751d391-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079253 4938 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079266 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86c2e50c-fd7c-42a3-9c94-07aa0751d391-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.079284 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86c2e50c-fd7c-42a3-9c94-07aa0751d391-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.080702 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.180616 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbmxn\" (UniqueName: \"kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn\") pod \"67463515-1e01-40aa-b47b-e15d1dc63ef8\" (UID: \"67463515-1e01-40aa-b47b-e15d1dc63ef8\") " Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.202270 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn" (OuterVolumeSpecName: "kube-api-access-hbmxn") pod "67463515-1e01-40aa-b47b-e15d1dc63ef8" (UID: "67463515-1e01-40aa-b47b-e15d1dc63ef8"). InnerVolumeSpecName "kube-api-access-hbmxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.282855 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbmxn\" (UniqueName: \"kubernetes.io/projected/67463515-1e01-40aa-b47b-e15d1dc63ef8-kube-api-access-hbmxn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.306455 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w48hh" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.312860 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.384660 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:23 crc kubenswrapper[4938]: E1122 10:56:23.384947 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:23 crc kubenswrapper[4938]: E1122 10:56:23.384971 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:23 crc kubenswrapper[4938]: E1122 10:56:23.385013 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:25.384999397 +0000 UTC m=+1117.852836796 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.485619 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tt78\" (UniqueName: \"kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78\") pod \"8e880927-eabf-4d36-ae6d-40aa89780b1b\" (UID: \"8e880927-eabf-4d36-ae6d-40aa89780b1b\") " Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.485776 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr5gv\" (UniqueName: \"kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv\") pod \"82b8dcb0-b9c8-4773-a64f-e6d6e89df241\" (UID: \"82b8dcb0-b9c8-4773-a64f-e6d6e89df241\") " Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.488869 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv" (OuterVolumeSpecName: "kube-api-access-jr5gv") pod "82b8dcb0-b9c8-4773-a64f-e6d6e89df241" (UID: "82b8dcb0-b9c8-4773-a64f-e6d6e89df241"). InnerVolumeSpecName "kube-api-access-jr5gv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.493158 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78" (OuterVolumeSpecName: "kube-api-access-8tt78") pod "8e880927-eabf-4d36-ae6d-40aa89780b1b" (UID: "8e880927-eabf-4d36-ae6d-40aa89780b1b"). InnerVolumeSpecName "kube-api-access-8tt78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.588049 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tt78\" (UniqueName: \"kubernetes.io/projected/8e880927-eabf-4d36-ae6d-40aa89780b1b-kube-api-access-8tt78\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.588085 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr5gv\" (UniqueName: \"kubernetes.io/projected/82b8dcb0-b9c8-4773-a64f-e6d6e89df241-kube-api-access-jr5gv\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.879644 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zw4nk" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.879665 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zw4nk" event={"ID":"67463515-1e01-40aa-b47b-e15d1dc63ef8","Type":"ContainerDied","Data":"dc0865147cc4e1b3618449ea3cb27fbf290db487943f2370dfcec6ebddb039bf"} Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.879704 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc0865147cc4e1b3618449ea3cb27fbf290db487943f2370dfcec6ebddb039bf" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.881835 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r44fd" event={"ID":"82b8dcb0-b9c8-4773-a64f-e6d6e89df241","Type":"ContainerDied","Data":"de687cbd7af967415d6a5c16639d1d056c438a904b9a7acdb368f4e10eeb22b7"} Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.881874 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de687cbd7af967415d6a5c16639d1d056c438a904b9a7acdb368f4e10eeb22b7" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.881968 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r44fd" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.883351 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xn65g" event={"ID":"48baac61-428d-4d1d-aa99-39c8ca12e251","Type":"ContainerStarted","Data":"9afe9419ebd6ba31e69cbaf0345f88742cf2c0ba0d961db81f1f2fca383cd413"} Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.885061 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-w48hh" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.885005 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-w48hh" event={"ID":"8e880927-eabf-4d36-ae6d-40aa89780b1b","Type":"ContainerDied","Data":"0e488651e5888dde7bcd2711c659498f358d2317b2bd7fbb7a2dfd0dc47b39be"} Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.885102 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e488651e5888dde7bcd2711c659498f358d2317b2bd7fbb7a2dfd0dc47b39be" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.885070 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lpx7v" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.885214 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.949612 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-lpx7v"] Nov 22 10:56:23 crc kubenswrapper[4938]: I1122 10:56:23.962518 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-lpx7v"] Nov 22 10:56:24 crc kubenswrapper[4938]: I1122 10:56:24.407916 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 22 10:56:24 crc kubenswrapper[4938]: I1122 10:56:24.458263 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86c2e50c-fd7c-42a3-9c94-07aa0751d391" path="/var/lib/kubelet/pods/86c2e50c-fd7c-42a3-9c94-07aa0751d391/volumes" Nov 22 10:56:25 crc kubenswrapper[4938]: I1122 10:56:25.417638 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:25 crc kubenswrapper[4938]: E1122 10:56:25.417837 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:25 crc kubenswrapper[4938]: E1122 10:56:25.417867 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:25 crc kubenswrapper[4938]: E1122 10:56:25.417952 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:29.417915125 +0000 UTC m=+1121.885752524 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:28 crc kubenswrapper[4938]: I1122 10:56:28.922082 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xn65g" event={"ID":"48baac61-428d-4d1d-aa99-39c8ca12e251","Type":"ContainerStarted","Data":"1e994bdb3c375559febba01bd2c0152ebcf6d610d52ca906e19b18c56af1566e"} Nov 22 10:56:28 crc kubenswrapper[4938]: I1122 10:56:28.944651 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-xn65g" podStartSLOduration=1.912498817 podStartE2EDuration="6.944632619s" podCreationTimestamp="2025-11-22 10:56:22 +0000 UTC" firstStartedPulling="2025-11-22 10:56:22.982695702 +0000 UTC m=+1115.450533101" lastFinishedPulling="2025-11-22 10:56:28.014829504 +0000 UTC m=+1120.482666903" observedRunningTime="2025-11-22 10:56:28.937990731 +0000 UTC m=+1121.405828140" watchObservedRunningTime="2025-11-22 10:56:28.944632619 +0000 UTC m=+1121.412470018" Nov 22 10:56:29 crc kubenswrapper[4938]: I1122 10:56:29.480698 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:29 crc kubenswrapper[4938]: E1122 10:56:29.480881 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:29 crc kubenswrapper[4938]: E1122 10:56:29.480957 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:29 crc kubenswrapper[4938]: E1122 10:56:29.481004 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:37.480990212 +0000 UTC m=+1129.948827611 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:30 crc kubenswrapper[4938]: I1122 10:56:30.435261 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 10:56:30 crc kubenswrapper[4938]: I1122 10:56:30.824118 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:56:30 crc kubenswrapper[4938]: I1122 10:56:30.879764 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:30 crc kubenswrapper[4938]: I1122 10:56:30.882260 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-pzkcn" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="dnsmasq-dns" containerID="cri-o://d98eca2b077a7ea106b1ba2a59453e5af94409602992331fad8b1b03ee25ab14" gracePeriod=10 Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.962477 4938 generic.go:334] "Generic (PLEG): container finished" podID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerID="ba096eb02091d8c3a9903e2e267ae7540fdc7f45825b897f8f3fd5cde5794d36" exitCode=0 Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.962574 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerDied","Data":"ba096eb02091d8c3a9903e2e267ae7540fdc7f45825b897f8f3fd5cde5794d36"} Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.965582 4938 generic.go:334] "Generic (PLEG): container finished" podID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerID="d98eca2b077a7ea106b1ba2a59453e5af94409602992331fad8b1b03ee25ab14" exitCode=0 Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.965661 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pzkcn" event={"ID":"2ec93715-8ee1-4fa9-aae3-cd1fcc983154","Type":"ContainerDied","Data":"d98eca2b077a7ea106b1ba2a59453e5af94409602992331fad8b1b03ee25ab14"} Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.971316 4938 generic.go:334] "Generic (PLEG): container finished" podID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerID="a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652" exitCode=0 Nov 22 10:56:31 crc kubenswrapper[4938]: I1122 10:56:31.971359 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerDied","Data":"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652"} Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.218431 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.326593 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc\") pod \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.326915 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb\") pod \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.327103 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config\") pod \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.327212 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb\") pod \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.327307 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jqd6\" (UniqueName: \"kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6\") pod \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\" (UID: \"2ec93715-8ee1-4fa9-aae3-cd1fcc983154\") " Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.331562 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6" (OuterVolumeSpecName: "kube-api-access-5jqd6") pod "2ec93715-8ee1-4fa9-aae3-cd1fcc983154" (UID: "2ec93715-8ee1-4fa9-aae3-cd1fcc983154"). InnerVolumeSpecName "kube-api-access-5jqd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.369145 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ec93715-8ee1-4fa9-aae3-cd1fcc983154" (UID: "2ec93715-8ee1-4fa9-aae3-cd1fcc983154"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.370278 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ec93715-8ee1-4fa9-aae3-cd1fcc983154" (UID: "2ec93715-8ee1-4fa9-aae3-cd1fcc983154"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.372316 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config" (OuterVolumeSpecName: "config") pod "2ec93715-8ee1-4fa9-aae3-cd1fcc983154" (UID: "2ec93715-8ee1-4fa9-aae3-cd1fcc983154"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.380620 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ec93715-8ee1-4fa9-aae3-cd1fcc983154" (UID: "2ec93715-8ee1-4fa9-aae3-cd1fcc983154"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.430003 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.430052 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.430063 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.430074 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.430085 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jqd6\" (UniqueName: \"kubernetes.io/projected/2ec93715-8ee1-4fa9-aae3-cd1fcc983154-kube-api-access-5jqd6\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.980163 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pzkcn" event={"ID":"2ec93715-8ee1-4fa9-aae3-cd1fcc983154","Type":"ContainerDied","Data":"b21bfc78e0cb14efc99d270b56e01946f564d68face2e9e6d8839798fdc85b66"} Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.980200 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pzkcn" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.980505 4938 scope.go:117] "RemoveContainer" containerID="d98eca2b077a7ea106b1ba2a59453e5af94409602992331fad8b1b03ee25ab14" Nov 22 10:56:32 crc kubenswrapper[4938]: I1122 10:56:32.982314 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerStarted","Data":"17c6db99431b037bf08e4a0e783e917142cb0caf2d7ba06872fc681dbe0e72c4"} Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.000806 4938 scope.go:117] "RemoveContainer" containerID="0848bb1e06033757b2cd45b7e3358a62926f89ca37db015907bcfbad26b9c33a" Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.009749 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.019630 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pzkcn"] Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.990151 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerStarted","Data":"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040"} Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.990551 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:56:33 crc kubenswrapper[4938]: I1122 10:56:33.991346 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.017292 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371965.837513 podStartE2EDuration="1m11.017263026s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="2025-11-22 10:55:25.80955803 +0000 UTC m=+1058.277395429" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:34.014687631 +0000 UTC m=+1126.482525030" watchObservedRunningTime="2025-11-22 10:56:34.017263026 +0000 UTC m=+1126.485100425" Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.040480 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.102065366 podStartE2EDuration="1m11.040462622s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="2025-11-22 10:55:25.536599093 +0000 UTC m=+1058.004436492" lastFinishedPulling="2025-11-22 10:55:57.474996349 +0000 UTC m=+1089.942833748" observedRunningTime="2025-11-22 10:56:34.037074386 +0000 UTC m=+1126.504911805" watchObservedRunningTime="2025-11-22 10:56:34.040462622 +0000 UTC m=+1126.508300021" Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.456977 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" path="/var/lib/kubelet/pods/2ec93715-8ee1-4fa9-aae3-cd1fcc983154/volumes" Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.848464 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-sq62w" podUID="a2ffb7f9-f83c-4e71-af53-3d116e260d8e" containerName="ovn-controller" probeResult="failure" output=< Nov 22 10:56:34 crc kubenswrapper[4938]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 10:56:34 crc kubenswrapper[4938]: > Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.932460 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:56:34 crc kubenswrapper[4938]: I1122 10:56:34.935812 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dg79z" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.175866 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-sq62w-config-hnx4d"] Nov 22 10:56:35 crc kubenswrapper[4938]: E1122 10:56:35.176672 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="dnsmasq-dns" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.176690 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="dnsmasq-dns" Nov 22 10:56:35 crc kubenswrapper[4938]: E1122 10:56:35.176715 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="init" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.176724 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="init" Nov 22 10:56:35 crc kubenswrapper[4938]: E1122 10:56:35.176738 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67463515-1e01-40aa-b47b-e15d1dc63ef8" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.176745 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="67463515-1e01-40aa-b47b-e15d1dc63ef8" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: E1122 10:56:35.176773 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82b8dcb0-b9c8-4773-a64f-e6d6e89df241" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.176780 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="82b8dcb0-b9c8-4773-a64f-e6d6e89df241" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: E1122 10:56:35.176799 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e880927-eabf-4d36-ae6d-40aa89780b1b" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.176805 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e880927-eabf-4d36-ae6d-40aa89780b1b" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.177098 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ec93715-8ee1-4fa9-aae3-cd1fcc983154" containerName="dnsmasq-dns" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.177168 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e880927-eabf-4d36-ae6d-40aa89780b1b" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.177189 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="82b8dcb0-b9c8-4773-a64f-e6d6e89df241" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.177208 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="67463515-1e01-40aa-b47b-e15d1dc63ef8" containerName="mariadb-database-create" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.178379 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.197387 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.200954 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sq62w-config-hnx4d"] Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.274732 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.274811 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.274839 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q94v\" (UniqueName: \"kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.274911 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.274954 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.275015 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.376889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.376954 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q94v\" (UniqueName: \"kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377012 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377031 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377074 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377124 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377277 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377362 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377412 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.377993 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.378943 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.411794 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q94v\" (UniqueName: \"kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v\") pod \"ovn-controller-sq62w-config-hnx4d\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.500680 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:35 crc kubenswrapper[4938]: I1122 10:56:35.998441 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-sq62w-config-hnx4d"] Nov 22 10:56:36 crc kubenswrapper[4938]: W1122 10:56:36.001003 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0a7146c_d916_4e6b_b432_8680fc5d585c.slice/crio-6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551 WatchSource:0}: Error finding container 6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551: Status 404 returned error can't find the container with id 6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551 Nov 22 10:56:36 crc kubenswrapper[4938]: I1122 10:56:36.010849 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w-config-hnx4d" event={"ID":"d0a7146c-d916-4e6b-b432-8680fc5d585c","Type":"ContainerStarted","Data":"6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551"} Nov 22 10:56:37 crc kubenswrapper[4938]: I1122 10:56:37.511485 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:37 crc kubenswrapper[4938]: E1122 10:56:37.511685 4938 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:56:37 crc kubenswrapper[4938]: E1122 10:56:37.511928 4938 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:56:37 crc kubenswrapper[4938]: E1122 10:56:37.511988 4938 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift podName:7a0f58e0-5202-4792-bd1a-64966c18450f nodeName:}" failed. No retries permitted until 2025-11-22 10:56:53.511968831 +0000 UTC m=+1145.979806230 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift") pod "swift-storage-0" (UID: "7a0f58e0-5202-4792-bd1a-64966c18450f") : configmap "swift-ring-files" not found Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.026345 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w-config-hnx4d" event={"ID":"d0a7146c-d916-4e6b-b432-8680fc5d585c","Type":"ContainerStarted","Data":"cca482e3ff4f18deaae67014776c517027026d5147e33866a8e05dcc9c78246d"} Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.049234 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-sq62w-config-hnx4d" podStartSLOduration=3.049213106 podStartE2EDuration="3.049213106s" podCreationTimestamp="2025-11-22 10:56:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:38.044006565 +0000 UTC m=+1130.511843964" watchObservedRunningTime="2025-11-22 10:56:38.049213106 +0000 UTC m=+1130.517050505" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.169337 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5140-account-create-cbl8j"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.171198 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.174048 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.179991 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5140-account-create-cbl8j"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.222787 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvbb5\" (UniqueName: \"kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5\") pod \"keystone-5140-account-create-cbl8j\" (UID: \"075db7dd-b0f9-4b53-8a2e-02af60130f52\") " pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.324550 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvbb5\" (UniqueName: \"kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5\") pod \"keystone-5140-account-create-cbl8j\" (UID: \"075db7dd-b0f9-4b53-8a2e-02af60130f52\") " pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.341314 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvbb5\" (UniqueName: \"kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5\") pod \"keystone-5140-account-create-cbl8j\" (UID: \"075db7dd-b0f9-4b53-8a2e-02af60130f52\") " pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.494733 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.576895 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-80b6-account-create-qj8v6"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.578121 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.580025 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.589430 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-80b6-account-create-qj8v6"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.628391 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksxpb\" (UniqueName: \"kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb\") pod \"placement-80b6-account-create-qj8v6\" (UID: \"71c28968-eb30-4073-843c-65ddfd4a5073\") " pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.730962 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksxpb\" (UniqueName: \"kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb\") pod \"placement-80b6-account-create-qj8v6\" (UID: \"71c28968-eb30-4073-843c-65ddfd4a5073\") " pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.772796 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksxpb\" (UniqueName: \"kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb\") pod \"placement-80b6-account-create-qj8v6\" (UID: \"71c28968-eb30-4073-843c-65ddfd4a5073\") " pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.798027 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-18f1-account-create-wp2lb"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.799137 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.801150 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.804986 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-18f1-account-create-wp2lb"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.832270 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdwjd\" (UniqueName: \"kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd\") pod \"glance-18f1-account-create-wp2lb\" (UID: \"327fd7ac-029b-487f-a63c-28667c1ad2e2\") " pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.900495 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.934525 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdwjd\" (UniqueName: \"kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd\") pod \"glance-18f1-account-create-wp2lb\" (UID: \"327fd7ac-029b-487f-a63c-28667c1ad2e2\") " pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.935593 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5140-account-create-cbl8j"] Nov 22 10:56:38 crc kubenswrapper[4938]: I1122 10:56:38.964553 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdwjd\" (UniqueName: \"kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd\") pod \"glance-18f1-account-create-wp2lb\" (UID: \"327fd7ac-029b-487f-a63c-28667c1ad2e2\") " pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.034486 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5140-account-create-cbl8j" event={"ID":"075db7dd-b0f9-4b53-8a2e-02af60130f52","Type":"ContainerStarted","Data":"a2d55525589bbac110896a99a7bbbefca15cd64fcd8024a7f75a95b5363f8d34"} Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.036827 4938 generic.go:334] "Generic (PLEG): container finished" podID="d0a7146c-d916-4e6b-b432-8680fc5d585c" containerID="cca482e3ff4f18deaae67014776c517027026d5147e33866a8e05dcc9c78246d" exitCode=0 Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.036867 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w-config-hnx4d" event={"ID":"d0a7146c-d916-4e6b-b432-8680fc5d585c","Type":"ContainerDied","Data":"cca482e3ff4f18deaae67014776c517027026d5147e33866a8e05dcc9c78246d"} Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.122479 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.343340 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-80b6-account-create-qj8v6"] Nov 22 10:56:39 crc kubenswrapper[4938]: W1122 10:56:39.355226 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71c28968_eb30_4073_843c_65ddfd4a5073.slice/crio-f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0 WatchSource:0}: Error finding container f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0: Status 404 returned error can't find the container with id f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0 Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.568135 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-18f1-account-create-wp2lb"] Nov 22 10:56:39 crc kubenswrapper[4938]: W1122 10:56:39.580186 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod327fd7ac_029b_487f_a63c_28667c1ad2e2.slice/crio-282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6 WatchSource:0}: Error finding container 282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6: Status 404 returned error can't find the container with id 282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6 Nov 22 10:56:39 crc kubenswrapper[4938]: I1122 10:56:39.847357 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-sq62w" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.047944 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-18f1-account-create-wp2lb" event={"ID":"327fd7ac-029b-487f-a63c-28667c1ad2e2","Type":"ContainerStarted","Data":"282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6"} Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.049527 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-80b6-account-create-qj8v6" event={"ID":"71c28968-eb30-4073-843c-65ddfd4a5073","Type":"ContainerStarted","Data":"f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0"} Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.286141 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354209 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354264 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354347 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354354 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354391 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354424 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run" (OuterVolumeSpecName: "var-run") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354445 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q94v\" (UniqueName: \"kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354479 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.354582 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts\") pod \"d0a7146c-d916-4e6b-b432-8680fc5d585c\" (UID: \"d0a7146c-d916-4e6b-b432-8680fc5d585c\") " Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.355289 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.355370 4938 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.355390 4938 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.355403 4938 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d0a7146c-d916-4e6b-b432-8680fc5d585c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.355849 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts" (OuterVolumeSpecName: "scripts") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.360891 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v" (OuterVolumeSpecName: "kube-api-access-4q94v") pod "d0a7146c-d916-4e6b-b432-8680fc5d585c" (UID: "d0a7146c-d916-4e6b-b432-8680fc5d585c"). InnerVolumeSpecName "kube-api-access-4q94v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.456583 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.456615 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q94v\" (UniqueName: \"kubernetes.io/projected/d0a7146c-d916-4e6b-b432-8680fc5d585c-kube-api-access-4q94v\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:40 crc kubenswrapper[4938]: I1122 10:56:40.456626 4938 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d0a7146c-d916-4e6b-b432-8680fc5d585c-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.056930 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-sq62w-config-hnx4d" event={"ID":"d0a7146c-d916-4e6b-b432-8680fc5d585c","Type":"ContainerDied","Data":"6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551"} Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.056971 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f53e80d791e7b6e267282e86335701791e6ab7006cd72a7d666a90290c59551" Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.056971 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-sq62w-config-hnx4d" Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.142210 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-sq62w-config-hnx4d"] Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.150435 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-sq62w-config-hnx4d"] Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.300407 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:56:41 crc kubenswrapper[4938]: I1122 10:56:41.300795 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:56:42 crc kubenswrapper[4938]: I1122 10:56:42.064332 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-18f1-account-create-wp2lb" event={"ID":"327fd7ac-029b-487f-a63c-28667c1ad2e2","Type":"ContainerStarted","Data":"1ce96aa0a206ea67b41747b27cf69edb9cd5e5b3b31fba184220fda1c4889227"} Nov 22 10:56:42 crc kubenswrapper[4938]: I1122 10:56:42.066076 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-80b6-account-create-qj8v6" event={"ID":"71c28968-eb30-4073-843c-65ddfd4a5073","Type":"ContainerStarted","Data":"4d8635fd34904ad365126e4cbe9b13e7ec937b64204c1c5ccaffd8eea5521edd"} Nov 22 10:56:42 crc kubenswrapper[4938]: I1122 10:56:42.067744 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5140-account-create-cbl8j" event={"ID":"075db7dd-b0f9-4b53-8a2e-02af60130f52","Type":"ContainerStarted","Data":"087ec4c04f9e9f8ec2461fe58f8cc4971238e43c21bacccd8673b4cdbd80ae53"} Nov 22 10:56:42 crc kubenswrapper[4938]: I1122 10:56:42.457280 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0a7146c-d916-4e6b-b432-8680fc5d585c" path="/var/lib/kubelet/pods/d0a7146c-d916-4e6b-b432-8680fc5d585c/volumes" Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.083896 4938 generic.go:334] "Generic (PLEG): container finished" podID="71c28968-eb30-4073-843c-65ddfd4a5073" containerID="4d8635fd34904ad365126e4cbe9b13e7ec937b64204c1c5ccaffd8eea5521edd" exitCode=0 Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.083959 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-80b6-account-create-qj8v6" event={"ID":"71c28968-eb30-4073-843c-65ddfd4a5073","Type":"ContainerDied","Data":"4d8635fd34904ad365126e4cbe9b13e7ec937b64204c1c5ccaffd8eea5521edd"} Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.085965 4938 generic.go:334] "Generic (PLEG): container finished" podID="48baac61-428d-4d1d-aa99-39c8ca12e251" containerID="1e994bdb3c375559febba01bd2c0152ebcf6d610d52ca906e19b18c56af1566e" exitCode=0 Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.086061 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xn65g" event={"ID":"48baac61-428d-4d1d-aa99-39c8ca12e251","Type":"ContainerDied","Data":"1e994bdb3c375559febba01bd2c0152ebcf6d610d52ca906e19b18c56af1566e"} Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.087519 4938 generic.go:334] "Generic (PLEG): container finished" podID="075db7dd-b0f9-4b53-8a2e-02af60130f52" containerID="087ec4c04f9e9f8ec2461fe58f8cc4971238e43c21bacccd8673b4cdbd80ae53" exitCode=0 Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.087544 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5140-account-create-cbl8j" event={"ID":"075db7dd-b0f9-4b53-8a2e-02af60130f52","Type":"ContainerDied","Data":"087ec4c04f9e9f8ec2461fe58f8cc4971238e43c21bacccd8673b4cdbd80ae53"} Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.088722 4938 generic.go:334] "Generic (PLEG): container finished" podID="327fd7ac-029b-487f-a63c-28667c1ad2e2" containerID="1ce96aa0a206ea67b41747b27cf69edb9cd5e5b3b31fba184220fda1c4889227" exitCode=0 Nov 22 10:56:44 crc kubenswrapper[4938]: I1122 10:56:44.088769 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-18f1-account-create-wp2lb" event={"ID":"327fd7ac-029b-487f-a63c-28667c1ad2e2","Type":"ContainerDied","Data":"1ce96aa0a206ea67b41747b27cf69edb9cd5e5b3b31fba184220fda1c4889227"} Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.030488 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.345592 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.451141 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.570369 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.575829 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.586813 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645095 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645135 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvbb5\" (UniqueName: \"kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5\") pod \"075db7dd-b0f9-4b53-8a2e-02af60130f52\" (UID: \"075db7dd-b0f9-4b53-8a2e-02af60130f52\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645173 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdwjd\" (UniqueName: \"kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd\") pod \"327fd7ac-029b-487f-a63c-28667c1ad2e2\" (UID: \"327fd7ac-029b-487f-a63c-28667c1ad2e2\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645201 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645279 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645316 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645333 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksxpb\" (UniqueName: \"kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb\") pod \"71c28968-eb30-4073-843c-65ddfd4a5073\" (UID: \"71c28968-eb30-4073-843c-65ddfd4a5073\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645351 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645370 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5f2z\" (UniqueName: \"kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.645392 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts\") pod \"48baac61-428d-4d1d-aa99-39c8ca12e251\" (UID: \"48baac61-428d-4d1d-aa99-39c8ca12e251\") " Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.646829 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.647339 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.650434 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd" (OuterVolumeSpecName: "kube-api-access-vdwjd") pod "327fd7ac-029b-487f-a63c-28667c1ad2e2" (UID: "327fd7ac-029b-487f-a63c-28667c1ad2e2"). InnerVolumeSpecName "kube-api-access-vdwjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.650849 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5" (OuterVolumeSpecName: "kube-api-access-cvbb5") pod "075db7dd-b0f9-4b53-8a2e-02af60130f52" (UID: "075db7dd-b0f9-4b53-8a2e-02af60130f52"). InnerVolumeSpecName "kube-api-access-cvbb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.652004 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb" (OuterVolumeSpecName: "kube-api-access-ksxpb") pod "71c28968-eb30-4073-843c-65ddfd4a5073" (UID: "71c28968-eb30-4073-843c-65ddfd4a5073"). InnerVolumeSpecName "kube-api-access-ksxpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.652700 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z" (OuterVolumeSpecName: "kube-api-access-h5f2z") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "kube-api-access-h5f2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.653460 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.668325 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.668656 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts" (OuterVolumeSpecName: "scripts") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.671037 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "48baac61-428d-4d1d-aa99-39c8ca12e251" (UID: "48baac61-428d-4d1d-aa99-39c8ca12e251"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746803 4938 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/48baac61-428d-4d1d-aa99-39c8ca12e251-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746840 4938 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746849 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746860 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksxpb\" (UniqueName: \"kubernetes.io/projected/71c28968-eb30-4073-843c-65ddfd4a5073-kube-api-access-ksxpb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746870 4938 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746879 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5f2z\" (UniqueName: \"kubernetes.io/projected/48baac61-428d-4d1d-aa99-39c8ca12e251-kube-api-access-h5f2z\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746888 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48baac61-428d-4d1d-aa99-39c8ca12e251-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746896 4938 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/48baac61-428d-4d1d-aa99-39c8ca12e251-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746904 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvbb5\" (UniqueName: \"kubernetes.io/projected/075db7dd-b0f9-4b53-8a2e-02af60130f52-kube-api-access-cvbb5\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:45 crc kubenswrapper[4938]: I1122 10:56:45.746926 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdwjd\" (UniqueName: \"kubernetes.io/projected/327fd7ac-029b-487f-a63c-28667c1ad2e2-kube-api-access-vdwjd\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.103996 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5140-account-create-cbl8j" event={"ID":"075db7dd-b0f9-4b53-8a2e-02af60130f52","Type":"ContainerDied","Data":"a2d55525589bbac110896a99a7bbbefca15cd64fcd8024a7f75a95b5363f8d34"} Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.104318 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2d55525589bbac110896a99a7bbbefca15cd64fcd8024a7f75a95b5363f8d34" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.104032 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5140-account-create-cbl8j" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.105665 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-18f1-account-create-wp2lb" event={"ID":"327fd7ac-029b-487f-a63c-28667c1ad2e2","Type":"ContainerDied","Data":"282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6"} Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.105709 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="282c00645dd55f1dd30f0203a6bf87a0cf3ddaab8bcefb8f1e2508b06a8c2ae6" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.105672 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-18f1-account-create-wp2lb" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.108783 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-80b6-account-create-qj8v6" event={"ID":"71c28968-eb30-4073-843c-65ddfd4a5073","Type":"ContainerDied","Data":"f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0"} Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.108945 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f62224109b40633e7563978f4576ae2e79871ea33019847d2b165f6bb2d665b0" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.109025 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-80b6-account-create-qj8v6" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.113519 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xn65g" event={"ID":"48baac61-428d-4d1d-aa99-39c8ca12e251","Type":"ContainerDied","Data":"9afe9419ebd6ba31e69cbaf0345f88742cf2c0ba0d961db81f1f2fca383cd413"} Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.113564 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9afe9419ebd6ba31e69cbaf0345f88742cf2c0ba0d961db81f1f2fca383cd413" Nov 22 10:56:46 crc kubenswrapper[4938]: I1122 10:56:46.113750 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xn65g" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.021216 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wn476"] Nov 22 10:56:49 crc kubenswrapper[4938]: E1122 10:56:49.022981 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327fd7ac-029b-487f-a63c-28667c1ad2e2" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023054 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="327fd7ac-029b-487f-a63c-28667c1ad2e2" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: E1122 10:56:49.023222 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a7146c-d916-4e6b-b432-8680fc5d585c" containerName="ovn-config" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023285 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a7146c-d916-4e6b-b432-8680fc5d585c" containerName="ovn-config" Nov 22 10:56:49 crc kubenswrapper[4938]: E1122 10:56:49.023343 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71c28968-eb30-4073-843c-65ddfd4a5073" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023392 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="71c28968-eb30-4073-843c-65ddfd4a5073" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: E1122 10:56:49.023448 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075db7dd-b0f9-4b53-8a2e-02af60130f52" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023504 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="075db7dd-b0f9-4b53-8a2e-02af60130f52" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: E1122 10:56:49.023564 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48baac61-428d-4d1d-aa99-39c8ca12e251" containerName="swift-ring-rebalance" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023620 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="48baac61-428d-4d1d-aa99-39c8ca12e251" containerName="swift-ring-rebalance" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023849 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="327fd7ac-029b-487f-a63c-28667c1ad2e2" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.023938 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="71c28968-eb30-4073-843c-65ddfd4a5073" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.024032 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="075db7dd-b0f9-4b53-8a2e-02af60130f52" containerName="mariadb-account-create" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.024089 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="48baac61-428d-4d1d-aa99-39c8ca12e251" containerName="swift-ring-rebalance" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.024155 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a7146c-d916-4e6b-b432-8680fc5d585c" containerName="ovn-config" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.024925 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.026342 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wn476"] Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.027490 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.027734 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c5zth" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.098106 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.098228 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7r2c7\" (UniqueName: \"kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.098410 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.098458 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.199793 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.199861 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.199949 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.200025 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7r2c7\" (UniqueName: \"kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.210586 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.210778 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.214066 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.216444 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7r2c7\" (UniqueName: \"kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7\") pod \"glance-db-sync-wn476\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.350938 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wn476" Nov 22 10:56:49 crc kubenswrapper[4938]: I1122 10:56:49.680979 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wn476"] Nov 22 10:56:50 crc kubenswrapper[4938]: I1122 10:56:50.177718 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wn476" event={"ID":"181febdf-4ec0-45f1-a062-f2f097504deb","Type":"ContainerStarted","Data":"54e4e1ee0dece242144f628b04d704e57c9768a133a84b6d244725cd09764ed7"} Nov 22 10:56:53 crc kubenswrapper[4938]: I1122 10:56:53.570560 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:53 crc kubenswrapper[4938]: I1122 10:56:53.586247 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7a0f58e0-5202-4792-bd1a-64966c18450f-etc-swift\") pod \"swift-storage-0\" (UID: \"7a0f58e0-5202-4792-bd1a-64966c18450f\") " pod="openstack/swift-storage-0" Nov 22 10:56:53 crc kubenswrapper[4938]: I1122 10:56:53.740362 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.030141 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.343072 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.482568 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-5bmmm"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.484570 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5bmmm" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.496250 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-5bmmm"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.582718 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-d64lt"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.586798 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d64lt" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.604199 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbdrk\" (UniqueName: \"kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk\") pod \"cinder-db-create-5bmmm\" (UID: \"577e27a5-3da6-4a00-9897-be6ef6a50c58\") " pod="openstack/cinder-db-create-5bmmm" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.649497 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-d64lt"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.692856 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-lr6cz"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.694140 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-lr6cz" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.701135 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-lr6cz"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.705770 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvdlp\" (UniqueName: \"kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp\") pod \"barbican-db-create-d64lt\" (UID: \"f645453d-fd00-45da-bc78-1f8bde75b6e3\") " pod="openstack/barbican-db-create-d64lt" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.705832 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbdrk\" (UniqueName: \"kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk\") pod \"cinder-db-create-5bmmm\" (UID: \"577e27a5-3da6-4a00-9897-be6ef6a50c58\") " pod="openstack/cinder-db-create-5bmmm" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.733989 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbdrk\" (UniqueName: \"kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk\") pod \"cinder-db-create-5bmmm\" (UID: \"577e27a5-3da6-4a00-9897-be6ef6a50c58\") " pod="openstack/cinder-db-create-5bmmm" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.783546 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-x62vc"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.785241 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.787394 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.788544 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.788845 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.788997 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ct79k" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.791970 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-x62vc"] Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.806589 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5bmmm" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.807147 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht26k\" (UniqueName: \"kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k\") pod \"neutron-db-create-lr6cz\" (UID: \"0f269f03-228e-42e2-b49f-106fcb2bc258\") " pod="openstack/neutron-db-create-lr6cz" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.807257 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvdlp\" (UniqueName: \"kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp\") pod \"barbican-db-create-d64lt\" (UID: \"f645453d-fd00-45da-bc78-1f8bde75b6e3\") " pod="openstack/barbican-db-create-d64lt" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.841528 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvdlp\" (UniqueName: \"kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp\") pod \"barbican-db-create-d64lt\" (UID: \"f645453d-fd00-45da-bc78-1f8bde75b6e3\") " pod="openstack/barbican-db-create-d64lt" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.904236 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d64lt" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.909026 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht26k\" (UniqueName: \"kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k\") pod \"neutron-db-create-lr6cz\" (UID: \"0f269f03-228e-42e2-b49f-106fcb2bc258\") " pod="openstack/neutron-db-create-lr6cz" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.909168 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.909204 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqmp2\" (UniqueName: \"kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.909236 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:55 crc kubenswrapper[4938]: I1122 10:56:55.926413 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht26k\" (UniqueName: \"kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k\") pod \"neutron-db-create-lr6cz\" (UID: \"0f269f03-228e-42e2-b49f-106fcb2bc258\") " pod="openstack/neutron-db-create-lr6cz" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.011251 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.011311 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqmp2\" (UniqueName: \"kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.011350 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.015897 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-lr6cz" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.015899 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.017251 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.036424 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqmp2\" (UniqueName: \"kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2\") pod \"keystone-db-sync-x62vc\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:56 crc kubenswrapper[4938]: I1122 10:56:56.102505 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-x62vc" Nov 22 10:56:58 crc kubenswrapper[4938]: E1122 10:56:58.229783 4938 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.182:48698->38.102.83.182:45775: write tcp 38.102.83.182:48698->38.102.83.182:45775: write: broken pipe Nov 22 10:57:02 crc kubenswrapper[4938]: I1122 10:57:02.822574 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-d64lt"] Nov 22 10:57:02 crc kubenswrapper[4938]: I1122 10:57:02.831534 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-5bmmm"] Nov 22 10:57:02 crc kubenswrapper[4938]: W1122 10:57:02.837641 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod577e27a5_3da6_4a00_9897_be6ef6a50c58.slice/crio-00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e WatchSource:0}: Error finding container 00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e: Status 404 returned error can't find the container with id 00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e Nov 22 10:57:02 crc kubenswrapper[4938]: I1122 10:57:02.960415 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-x62vc"] Nov 22 10:57:02 crc kubenswrapper[4938]: I1122 10:57:02.974805 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-lr6cz"] Nov 22 10:57:02 crc kubenswrapper[4938]: W1122 10:57:02.976774 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f269f03_228e_42e2_b49f_106fcb2bc258.slice/crio-cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2 WatchSource:0}: Error finding container cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2: Status 404 returned error can't find the container with id cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2 Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.059466 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:57:03 crc kubenswrapper[4938]: W1122 10:57:03.063578 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a0f58e0_5202_4792_bd1a_64966c18450f.slice/crio-1ec9d6792e365e839a7bb2470b77dc4a750764f6f95437ff77575fe15efdff0f WatchSource:0}: Error finding container 1ec9d6792e365e839a7bb2470b77dc4a750764f6f95437ff77575fe15efdff0f: Status 404 returned error can't find the container with id 1ec9d6792e365e839a7bb2470b77dc4a750764f6f95437ff77575fe15efdff0f Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.310000 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wn476" event={"ID":"181febdf-4ec0-45f1-a062-f2f097504deb","Type":"ContainerStarted","Data":"e134f011d7098d127ca2fd87a15ac5d118f8e48dbc13b2250eb5777ebd84b04c"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.311084 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"1ec9d6792e365e839a7bb2470b77dc4a750764f6f95437ff77575fe15efdff0f"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.312266 4938 generic.go:334] "Generic (PLEG): container finished" podID="0f269f03-228e-42e2-b49f-106fcb2bc258" containerID="f83161d1b77395d21f5c7f6dcadbefa730542f6f0cfae26215b24faba3d96355" exitCode=0 Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.312302 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-lr6cz" event={"ID":"0f269f03-228e-42e2-b49f-106fcb2bc258","Type":"ContainerDied","Data":"f83161d1b77395d21f5c7f6dcadbefa730542f6f0cfae26215b24faba3d96355"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.312339 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-lr6cz" event={"ID":"0f269f03-228e-42e2-b49f-106fcb2bc258","Type":"ContainerStarted","Data":"cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.313730 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-x62vc" event={"ID":"87059052-68f8-4e9c-9701-51a7f618e383","Type":"ContainerStarted","Data":"f6c4410aad58c6efe2a647323c9c2713b8a5664bc05ee8b6ad1bcbce6f1a16cc"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.314790 4938 generic.go:334] "Generic (PLEG): container finished" podID="f645453d-fd00-45da-bc78-1f8bde75b6e3" containerID="9a08f6e69f11d128d3b7000b90674a6fd9c82efedbd37e69eaf32a169c94b907" exitCode=0 Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.314818 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d64lt" event={"ID":"f645453d-fd00-45da-bc78-1f8bde75b6e3","Type":"ContainerDied","Data":"9a08f6e69f11d128d3b7000b90674a6fd9c82efedbd37e69eaf32a169c94b907"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.314847 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d64lt" event={"ID":"f645453d-fd00-45da-bc78-1f8bde75b6e3","Type":"ContainerStarted","Data":"14004103e8e3804c2106c07d9e3fcc5c6a04dd91a14df734b1a795cf9f630594"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.315985 4938 generic.go:334] "Generic (PLEG): container finished" podID="577e27a5-3da6-4a00-9897-be6ef6a50c58" containerID="5c86082550534649d7823b2c65a8cc98e8cd46fbca02e5267f6b43cc9635dd1d" exitCode=0 Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.316011 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5bmmm" event={"ID":"577e27a5-3da6-4a00-9897-be6ef6a50c58","Type":"ContainerDied","Data":"5c86082550534649d7823b2c65a8cc98e8cd46fbca02e5267f6b43cc9635dd1d"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.316028 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5bmmm" event={"ID":"577e27a5-3da6-4a00-9897-be6ef6a50c58","Type":"ContainerStarted","Data":"00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e"} Nov 22 10:57:03 crc kubenswrapper[4938]: I1122 10:57:03.337633 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wn476" podStartSLOduration=2.613541363 podStartE2EDuration="15.337610428s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="2025-11-22 10:56:49.684134539 +0000 UTC m=+1142.151971938" lastFinishedPulling="2025-11-22 10:57:02.408203594 +0000 UTC m=+1154.876041003" observedRunningTime="2025-11-22 10:57:03.324744523 +0000 UTC m=+1155.792581922" watchObservedRunningTime="2025-11-22 10:57:03.337610428 +0000 UTC m=+1155.805447837" Nov 22 10:57:04 crc kubenswrapper[4938]: I1122 10:57:04.326795 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"87dee84ec23a71dd1adea3e93a2c8813cecdf25524e518369427c7102b8c9fe1"} Nov 22 10:57:05 crc kubenswrapper[4938]: I1122 10:57:05.339757 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"15011870037da1197ca0a23bae1899c4eb4343e8867930d9842da22ab0fe2086"} Nov 22 10:57:05 crc kubenswrapper[4938]: I1122 10:57:05.340224 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"859bddd1d242b41737ee5f374231d59b3e95e8751d481c6a3fa444b55497c9ba"} Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.149103 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-lr6cz" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.155965 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5bmmm" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.197536 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d64lt" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.307523 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvdlp\" (UniqueName: \"kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp\") pod \"f645453d-fd00-45da-bc78-1f8bde75b6e3\" (UID: \"f645453d-fd00-45da-bc78-1f8bde75b6e3\") " Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.307587 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbdrk\" (UniqueName: \"kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk\") pod \"577e27a5-3da6-4a00-9897-be6ef6a50c58\" (UID: \"577e27a5-3da6-4a00-9897-be6ef6a50c58\") " Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.307663 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ht26k\" (UniqueName: \"kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k\") pod \"0f269f03-228e-42e2-b49f-106fcb2bc258\" (UID: \"0f269f03-228e-42e2-b49f-106fcb2bc258\") " Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.315478 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk" (OuterVolumeSpecName: "kube-api-access-bbdrk") pod "577e27a5-3da6-4a00-9897-be6ef6a50c58" (UID: "577e27a5-3da6-4a00-9897-be6ef6a50c58"). InnerVolumeSpecName "kube-api-access-bbdrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.315748 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k" (OuterVolumeSpecName: "kube-api-access-ht26k") pod "0f269f03-228e-42e2-b49f-106fcb2bc258" (UID: "0f269f03-228e-42e2-b49f-106fcb2bc258"). InnerVolumeSpecName "kube-api-access-ht26k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.316627 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp" (OuterVolumeSpecName: "kube-api-access-fvdlp") pod "f645453d-fd00-45da-bc78-1f8bde75b6e3" (UID: "f645453d-fd00-45da-bc78-1f8bde75b6e3"). InnerVolumeSpecName "kube-api-access-fvdlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.360644 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-lr6cz" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.361039 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-lr6cz" event={"ID":"0f269f03-228e-42e2-b49f-106fcb2bc258","Type":"ContainerDied","Data":"cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2"} Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.361100 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbe36c0d4d687174f53db8e66e01744cea6e3db971e580cc167dfb5cee6784a2" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.362821 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d64lt" event={"ID":"f645453d-fd00-45da-bc78-1f8bde75b6e3","Type":"ContainerDied","Data":"14004103e8e3804c2106c07d9e3fcc5c6a04dd91a14df734b1a795cf9f630594"} Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.362844 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14004103e8e3804c2106c07d9e3fcc5c6a04dd91a14df734b1a795cf9f630594" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.362841 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d64lt" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.369544 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-5bmmm" event={"ID":"577e27a5-3da6-4a00-9897-be6ef6a50c58","Type":"ContainerDied","Data":"00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e"} Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.369583 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00751651357fb275403e4a863dad38d13455c7c3b291b54ac458cc33db37860e" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.369732 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-5bmmm" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.409654 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbdrk\" (UniqueName: \"kubernetes.io/projected/577e27a5-3da6-4a00-9897-be6ef6a50c58-kube-api-access-bbdrk\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.409703 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ht26k\" (UniqueName: \"kubernetes.io/projected/0f269f03-228e-42e2-b49f-106fcb2bc258-kube-api-access-ht26k\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:07 crc kubenswrapper[4938]: I1122 10:57:07.409724 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvdlp\" (UniqueName: \"kubernetes.io/projected/f645453d-fd00-45da-bc78-1f8bde75b6e3-kube-api-access-fvdlp\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4938]: I1122 10:57:08.378717 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"7a12126d41ddc8ac96701dd577f12a84656a6b777a143ef0276028a0fea5b459"} Nov 22 10:57:11 crc kubenswrapper[4938]: I1122 10:57:11.301155 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:57:11 crc kubenswrapper[4938]: I1122 10:57:11.301452 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:57:11 crc kubenswrapper[4938]: I1122 10:57:11.301494 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 10:57:11 crc kubenswrapper[4938]: I1122 10:57:11.302094 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:57:11 crc kubenswrapper[4938]: I1122 10:57:11.302143 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404" gracePeriod=600 Nov 22 10:57:12 crc kubenswrapper[4938]: I1122 10:57:12.412588 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404" exitCode=0 Nov 22 10:57:12 crc kubenswrapper[4938]: I1122 10:57:12.412635 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404"} Nov 22 10:57:12 crc kubenswrapper[4938]: I1122 10:57:12.413050 4938 scope.go:117] "RemoveContainer" containerID="9bf1ca56eb9b5ca54774b2ff22753d6d20a7c9a6e4ea3d50501b2ce9692054fe" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.499682 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-adde-account-create-8htj4"] Nov 22 10:57:15 crc kubenswrapper[4938]: E1122 10:57:15.500356 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="577e27a5-3da6-4a00-9897-be6ef6a50c58" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500372 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="577e27a5-3da6-4a00-9897-be6ef6a50c58" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: E1122 10:57:15.500401 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f269f03-228e-42e2-b49f-106fcb2bc258" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500410 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f269f03-228e-42e2-b49f-106fcb2bc258" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: E1122 10:57:15.500430 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f645453d-fd00-45da-bc78-1f8bde75b6e3" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500441 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f645453d-fd00-45da-bc78-1f8bde75b6e3" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500649 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="577e27a5-3da6-4a00-9897-be6ef6a50c58" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500707 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f269f03-228e-42e2-b49f-106fcb2bc258" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.500736 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f645453d-fd00-45da-bc78-1f8bde75b6e3" containerName="mariadb-database-create" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.501508 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.504403 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.525869 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-adde-account-create-8htj4"] Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.652495 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c44b4\" (UniqueName: \"kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4\") pod \"barbican-adde-account-create-8htj4\" (UID: \"81258080-d7e9-4f68-b261-fb3e9c467fed\") " pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.674319 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c5fa-account-create-qwnhv"] Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.675561 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.677464 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.684928 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c5fa-account-create-qwnhv"] Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.754743 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c44b4\" (UniqueName: \"kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4\") pod \"barbican-adde-account-create-8htj4\" (UID: \"81258080-d7e9-4f68-b261-fb3e9c467fed\") " pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.755207 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh2zf\" (UniqueName: \"kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf\") pod \"cinder-c5fa-account-create-qwnhv\" (UID: \"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b\") " pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.773970 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c44b4\" (UniqueName: \"kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4\") pod \"barbican-adde-account-create-8htj4\" (UID: \"81258080-d7e9-4f68-b261-fb3e9c467fed\") " pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.840639 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.856355 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh2zf\" (UniqueName: \"kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf\") pod \"cinder-c5fa-account-create-qwnhv\" (UID: \"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b\") " pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.873983 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7ebc-account-create-88cmf"] Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.876707 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.878718 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh2zf\" (UniqueName: \"kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf\") pod \"cinder-c5fa-account-create-qwnhv\" (UID: \"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b\") " pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.879022 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.890072 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ebc-account-create-88cmf"] Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.958062 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4tds\" (UniqueName: \"kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds\") pod \"neutron-7ebc-account-create-88cmf\" (UID: \"4ee48b43-df7f-4bb1-b037-8509d4ccee45\") " pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:15 crc kubenswrapper[4938]: I1122 10:57:15.998078 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:16 crc kubenswrapper[4938]: I1122 10:57:16.060233 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4tds\" (UniqueName: \"kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds\") pod \"neutron-7ebc-account-create-88cmf\" (UID: \"4ee48b43-df7f-4bb1-b037-8509d4ccee45\") " pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:16 crc kubenswrapper[4938]: I1122 10:57:16.078706 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4tds\" (UniqueName: \"kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds\") pod \"neutron-7ebc-account-create-88cmf\" (UID: \"4ee48b43-df7f-4bb1-b037-8509d4ccee45\") " pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:16 crc kubenswrapper[4938]: I1122 10:57:16.231290 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.157832 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ebc-account-create-88cmf"] Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.167417 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c5fa-account-create-qwnhv"] Nov 22 10:57:19 crc kubenswrapper[4938]: W1122 10:57:19.170100 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ee48b43_df7f_4bb1_b037_8509d4ccee45.slice/crio-b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf WatchSource:0}: Error finding container b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf: Status 404 returned error can't find the container with id b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf Nov 22 10:57:19 crc kubenswrapper[4938]: W1122 10:57:19.178240 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac19c1e7_231e_4dd0_84ce_0e7cc54feb9b.slice/crio-819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad WatchSource:0}: Error finding container 819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad: Status 404 returned error can't find the container with id 819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.293282 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-adde-account-create-8htj4"] Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.477105 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-adde-account-create-8htj4" event={"ID":"81258080-d7e9-4f68-b261-fb3e9c467fed","Type":"ContainerStarted","Data":"9a32bf269a3cf2fd9919d9b40b84a73917fb651bbfe7203cb4976b17bca8d677"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.483296 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.485576 4938 generic.go:334] "Generic (PLEG): container finished" podID="ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" containerID="cd7b60d2938e2770cfe2feb91a0aba1b7d1697b025b4311e086ba77ea683bb6d" exitCode=0 Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.485635 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c5fa-account-create-qwnhv" event={"ID":"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b","Type":"ContainerDied","Data":"cd7b60d2938e2770cfe2feb91a0aba1b7d1697b025b4311e086ba77ea683bb6d"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.485659 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c5fa-account-create-qwnhv" event={"ID":"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b","Type":"ContainerStarted","Data":"819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.491006 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"0cff540f8129f748bc112b4983b02756b78dfb789bb8e986e937cb06c96ebbad"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.491041 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"2586b73319d4efeb3953a0cf79937c25d053082232cd1a44c4388923a3b41b21"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.492557 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-x62vc" event={"ID":"87059052-68f8-4e9c-9701-51a7f618e383","Type":"ContainerStarted","Data":"bd3649f8c128786e6ac24b4865a8ecd81218dd97156b80e803c1410212cebcac"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.503464 4938 generic.go:334] "Generic (PLEG): container finished" podID="4ee48b43-df7f-4bb1-b037-8509d4ccee45" containerID="ce9bff0db94af30448ba1164fa0fcaf09033b66dcfe20f667a6df374ecfce7bf" exitCode=0 Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.503520 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ebc-account-create-88cmf" event={"ID":"4ee48b43-df7f-4bb1-b037-8509d4ccee45","Type":"ContainerDied","Data":"ce9bff0db94af30448ba1164fa0fcaf09033b66dcfe20f667a6df374ecfce7bf"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.503605 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ebc-account-create-88cmf" event={"ID":"4ee48b43-df7f-4bb1-b037-8509d4ccee45","Type":"ContainerStarted","Data":"b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf"} Nov 22 10:57:19 crc kubenswrapper[4938]: I1122 10:57:19.518371 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-x62vc" podStartSLOduration=8.783301024 podStartE2EDuration="24.518350365s" podCreationTimestamp="2025-11-22 10:56:55 +0000 UTC" firstStartedPulling="2025-11-22 10:57:02.974804412 +0000 UTC m=+1155.442641811" lastFinishedPulling="2025-11-22 10:57:18.709853753 +0000 UTC m=+1171.177691152" observedRunningTime="2025-11-22 10:57:19.518253543 +0000 UTC m=+1171.986090942" watchObservedRunningTime="2025-11-22 10:57:19.518350365 +0000 UTC m=+1171.986187764" Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.513721 4938 generic.go:334] "Generic (PLEG): container finished" podID="81258080-d7e9-4f68-b261-fb3e9c467fed" containerID="d216f5ec180a21957a61d54283f1c08881bbb217b37823302fee45b4f190e3cf" exitCode=0 Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.513827 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-adde-account-create-8htj4" event={"ID":"81258080-d7e9-4f68-b261-fb3e9c467fed","Type":"ContainerDied","Data":"d216f5ec180a21957a61d54283f1c08881bbb217b37823302fee45b4f190e3cf"} Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.524827 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"2cc2f2166b007111f8887ac6bcd8b8b39e94bb000ed1836f2ffe8913da2d41ee"} Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.524870 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"a9e668b3862aafe4c9da6290e114eecdb2121aa52d0261138207198b4e690523"} Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.847943 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.925992 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.942888 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4tds\" (UniqueName: \"kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds\") pod \"4ee48b43-df7f-4bb1-b037-8509d4ccee45\" (UID: \"4ee48b43-df7f-4bb1-b037-8509d4ccee45\") " Nov 22 10:57:20 crc kubenswrapper[4938]: I1122 10:57:20.948783 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds" (OuterVolumeSpecName: "kube-api-access-n4tds") pod "4ee48b43-df7f-4bb1-b037-8509d4ccee45" (UID: "4ee48b43-df7f-4bb1-b037-8509d4ccee45"). InnerVolumeSpecName "kube-api-access-n4tds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.044454 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh2zf\" (UniqueName: \"kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf\") pod \"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b\" (UID: \"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b\") " Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.044933 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4tds\" (UniqueName: \"kubernetes.io/projected/4ee48b43-df7f-4bb1-b037-8509d4ccee45-kube-api-access-n4tds\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.048711 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf" (OuterVolumeSpecName: "kube-api-access-nh2zf") pod "ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" (UID: "ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b"). InnerVolumeSpecName "kube-api-access-nh2zf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.146862 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh2zf\" (UniqueName: \"kubernetes.io/projected/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b-kube-api-access-nh2zf\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.533535 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ebc-account-create-88cmf" event={"ID":"4ee48b43-df7f-4bb1-b037-8509d4ccee45","Type":"ContainerDied","Data":"b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf"} Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.533856 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b19b8c32333a3d98d8d3427bf68c93ac1b9bcbcd14fdb258b87a6caef888b9bf" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.533559 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ebc-account-create-88cmf" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.536336 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c5fa-account-create-qwnhv" Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.536882 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c5fa-account-create-qwnhv" event={"ID":"ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b","Type":"ContainerDied","Data":"819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad"} Nov 22 10:57:21 crc kubenswrapper[4938]: I1122 10:57:21.536904 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="819f85b3970f1aa2c5de14b6a01cf9063ceb9e06941475f74f232ee41fb7b1ad" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:21.882355 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:21.959969 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c44b4\" (UniqueName: \"kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4\") pod \"81258080-d7e9-4f68-b261-fb3e9c467fed\" (UID: \"81258080-d7e9-4f68-b261-fb3e9c467fed\") " Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:21.965634 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4" (OuterVolumeSpecName: "kube-api-access-c44b4") pod "81258080-d7e9-4f68-b261-fb3e9c467fed" (UID: "81258080-d7e9-4f68-b261-fb3e9c467fed"). InnerVolumeSpecName "kube-api-access-c44b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:22.062321 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c44b4\" (UniqueName: \"kubernetes.io/projected/81258080-d7e9-4f68-b261-fb3e9c467fed-kube-api-access-c44b4\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:22.546143 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-adde-account-create-8htj4" event={"ID":"81258080-d7e9-4f68-b261-fb3e9c467fed","Type":"ContainerDied","Data":"9a32bf269a3cf2fd9919d9b40b84a73917fb651bbfe7203cb4976b17bca8d677"} Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:22.546463 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a32bf269a3cf2fd9919d9b40b84a73917fb651bbfe7203cb4976b17bca8d677" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:22.546534 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-adde-account-create-8htj4" Nov 22 10:57:22 crc kubenswrapper[4938]: I1122 10:57:22.551371 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"0bf04ee3b26d0d3aa5d583fc1b6239a2e03e44cb704aaa75aa70b4552be83f9d"} Nov 22 10:57:23 crc kubenswrapper[4938]: I1122 10:57:23.565238 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"e535698e71e437617b89d5bcf1306492995ce15892c5f5eb8d6d78b3b80ca940"} Nov 22 10:57:23 crc kubenswrapper[4938]: I1122 10:57:23.565741 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"a7138f8345bcd50c0c3881c5d206eec0dafde78b42c723eb8bb32a20e721e616"} Nov 22 10:57:23 crc kubenswrapper[4938]: I1122 10:57:23.565756 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"2f1bd71ec5234dc60473f8d96242fbf975ad83f74648ec462a0b442ec21c8190"} Nov 22 10:57:23 crc kubenswrapper[4938]: I1122 10:57:23.565765 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"b45ffaf0749673696b231080f0775f5d664785db8c229fbe1bb28d3891952513"} Nov 22 10:57:23 crc kubenswrapper[4938]: I1122 10:57:23.565774 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"93ba2371372ce81fe5c8e91c6614fd45adc4c4c84a68583535f63e3855d209cb"} Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.579110 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7a0f58e0-5202-4792-bd1a-64966c18450f","Type":"ContainerStarted","Data":"a913851502b82959e2488741b9fb96eaf3b1637a45b37e2365b68b13717ff9d2"} Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.581224 4938 generic.go:334] "Generic (PLEG): container finished" podID="87059052-68f8-4e9c-9701-51a7f618e383" containerID="bd3649f8c128786e6ac24b4865a8ecd81218dd97156b80e803c1410212cebcac" exitCode=0 Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.581262 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-x62vc" event={"ID":"87059052-68f8-4e9c-9701-51a7f618e383","Type":"ContainerDied","Data":"bd3649f8c128786e6ac24b4865a8ecd81218dd97156b80e803c1410212cebcac"} Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.622015 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=45.431718935 podStartE2EDuration="1m4.621993345s" podCreationTimestamp="2025-11-22 10:56:20 +0000 UTC" firstStartedPulling="2025-11-22 10:57:03.066200361 +0000 UTC m=+1155.534037760" lastFinishedPulling="2025-11-22 10:57:22.256474771 +0000 UTC m=+1174.724312170" observedRunningTime="2025-11-22 10:57:24.612495417 +0000 UTC m=+1177.080332826" watchObservedRunningTime="2025-11-22 10:57:24.621993345 +0000 UTC m=+1177.089830744" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.883661 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:24 crc kubenswrapper[4938]: E1122 10:57:24.883997 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81258080-d7e9-4f68-b261-fb3e9c467fed" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884012 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="81258080-d7e9-4f68-b261-fb3e9c467fed" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: E1122 10:57:24.884023 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ee48b43-df7f-4bb1-b037-8509d4ccee45" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884029 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ee48b43-df7f-4bb1-b037-8509d4ccee45" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: E1122 10:57:24.884049 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884055 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884198 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ee48b43-df7f-4bb1-b037-8509d4ccee45" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884218 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.884234 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="81258080-d7e9-4f68-b261-fb3e9c467fed" containerName="mariadb-account-create" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.885366 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.888563 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 22 10:57:24 crc kubenswrapper[4938]: I1122 10:57:24.895296 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.019304 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.019379 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.019425 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.019736 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.020019 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gfzv\" (UniqueName: \"kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.020225 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.123132 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.123233 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.123315 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gfzv\" (UniqueName: \"kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.123410 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124367 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124441 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124520 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124613 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124683 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.124916 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.125776 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.142310 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gfzv\" (UniqueName: \"kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv\") pod \"dnsmasq-dns-5c79d794d7-hwg8q\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.208592 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.599145 4938 generic.go:334] "Generic (PLEG): container finished" podID="181febdf-4ec0-45f1-a062-f2f097504deb" containerID="e134f011d7098d127ca2fd87a15ac5d118f8e48dbc13b2250eb5777ebd84b04c" exitCode=0 Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.599982 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wn476" event={"ID":"181febdf-4ec0-45f1-a062-f2f097504deb","Type":"ContainerDied","Data":"e134f011d7098d127ca2fd87a15ac5d118f8e48dbc13b2250eb5777ebd84b04c"} Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.657605 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.858370 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-x62vc" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.943343 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqmp2\" (UniqueName: \"kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2\") pod \"87059052-68f8-4e9c-9701-51a7f618e383\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.943477 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle\") pod \"87059052-68f8-4e9c-9701-51a7f618e383\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.943544 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data\") pod \"87059052-68f8-4e9c-9701-51a7f618e383\" (UID: \"87059052-68f8-4e9c-9701-51a7f618e383\") " Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.947256 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2" (OuterVolumeSpecName: "kube-api-access-lqmp2") pod "87059052-68f8-4e9c-9701-51a7f618e383" (UID: "87059052-68f8-4e9c-9701-51a7f618e383"). InnerVolumeSpecName "kube-api-access-lqmp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.974056 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87059052-68f8-4e9c-9701-51a7f618e383" (UID: "87059052-68f8-4e9c-9701-51a7f618e383"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:25 crc kubenswrapper[4938]: I1122 10:57:25.984116 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data" (OuterVolumeSpecName: "config-data") pod "87059052-68f8-4e9c-9701-51a7f618e383" (UID: "87059052-68f8-4e9c-9701-51a7f618e383"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.046187 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqmp2\" (UniqueName: \"kubernetes.io/projected/87059052-68f8-4e9c-9701-51a7f618e383-kube-api-access-lqmp2\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.046215 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.046225 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87059052-68f8-4e9c-9701-51a7f618e383-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.608867 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-x62vc" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.608867 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-x62vc" event={"ID":"87059052-68f8-4e9c-9701-51a7f618e383","Type":"ContainerDied","Data":"f6c4410aad58c6efe2a647323c9c2713b8a5664bc05ee8b6ad1bcbce6f1a16cc"} Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.608980 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6c4410aad58c6efe2a647323c9c2713b8a5664bc05ee8b6ad1bcbce6f1a16cc" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.611769 4938 generic.go:334] "Generic (PLEG): container finished" podID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerID="adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7" exitCode=0 Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.611829 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" event={"ID":"b0bf43ab-ac15-4d14-805b-261acabed1f6","Type":"ContainerDied","Data":"adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7"} Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.612379 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" event={"ID":"b0bf43ab-ac15-4d14-805b-261acabed1f6","Type":"ContainerStarted","Data":"abfacab3bbf5bbfaf2b43bca6312ddf09c6a0ec033e091b6d0174b50a14419e3"} Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.894808 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.906928 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dwqmw"] Nov 22 10:57:26 crc kubenswrapper[4938]: E1122 10:57:26.907299 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87059052-68f8-4e9c-9701-51a7f618e383" containerName="keystone-db-sync" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.907316 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="87059052-68f8-4e9c-9701-51a7f618e383" containerName="keystone-db-sync" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.907556 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="87059052-68f8-4e9c-9701-51a7f618e383" containerName="keystone-db-sync" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.908509 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.912861 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.913128 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ct79k" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.913295 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.919530 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.927240 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dwqmw"] Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.953868 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:26 crc kubenswrapper[4938]: I1122 10:57:26.955659 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.026546 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064349 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064415 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064486 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064512 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trmhj\" (UniqueName: \"kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064536 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4xxm\" (UniqueName: \"kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064584 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064608 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064657 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064682 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064722 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064743 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.064763 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166472 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166559 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166590 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166614 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166676 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166709 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166794 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166825 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trmhj\" (UniqueName: \"kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166849 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4xxm\" (UniqueName: \"kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166901 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.166927 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.167021 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.168026 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.169574 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.170006 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.170771 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.171403 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.175133 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.179221 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.183701 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.199355 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.203547 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.235373 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.237228 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.254662 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.254889 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.261614 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trmhj\" (UniqueName: \"kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj\") pod \"dnsmasq-dns-5b868669f-bdsk2\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.274406 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4xxm\" (UniqueName: \"kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm\") pod \"keystone-bootstrap-dwqmw\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.274484 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-ddndr"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.275956 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.282417 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.284327 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-2kfh9" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.284487 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.288122 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.297088 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.297336 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-s7p24" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.297510 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.317972 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-ddndr"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.367215 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-qd8nf"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.368323 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.374376 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mvrv7" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375561 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375624 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375653 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375719 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29p8b\" (UniqueName: \"kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375749 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69qsr\" (UniqueName: \"kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375771 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375809 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375871 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375894 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375924 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.375968 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.383310 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.395345 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.397452 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.469626 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wn476" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.477997 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29p8b\" (UniqueName: \"kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478038 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478057 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69qsr\" (UniqueName: \"kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478085 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478113 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478162 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478177 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478194 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478217 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478249 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb8tg\" (UniqueName: \"kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478266 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478297 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478315 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.478333 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.479726 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qd8nf"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.480019 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.480306 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.481490 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.482018 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.491032 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.494669 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.495399 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.495473 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.500123 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.541361 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:27 crc kubenswrapper[4938]: E1122 10:57:27.541821 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="181febdf-4ec0-45f1-a062-f2f097504deb" containerName="glance-db-sync" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.541841 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="181febdf-4ec0-45f1-a062-f2f097504deb" containerName="glance-db-sync" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.542049 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="181febdf-4ec0-45f1-a062-f2f097504deb" containerName="glance-db-sync" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.543012 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.544599 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.549595 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69qsr\" (UniqueName: \"kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr\") pod \"cinder-db-sync-ddndr\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.550198 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29p8b\" (UniqueName: \"kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b\") pod \"horizon-6df769749-fdrcj\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.562038 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vs774"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.615434 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.619388 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.619945 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.619997 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-2bhmb" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.635369 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-p9nkx"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671100 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle\") pod \"181febdf-4ec0-45f1-a062-f2f097504deb\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671224 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7r2c7\" (UniqueName: \"kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7\") pod \"181febdf-4ec0-45f1-a062-f2f097504deb\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671381 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data\") pod \"181febdf-4ec0-45f1-a062-f2f097504deb\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671420 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data\") pod \"181febdf-4ec0-45f1-a062-f2f097504deb\" (UID: \"181febdf-4ec0-45f1-a062-f2f097504deb\") " Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671659 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671793 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671829 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.671995 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.672127 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.672158 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w4v6\" (UniqueName: \"kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.672179 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb8tg\" (UniqueName: \"kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.672219 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.672285 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.685158 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.687009 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.690570 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.690815 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gsv52" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.710802 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.718032 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "181febdf-4ec0-45f1-a062-f2f097504deb" (UID: "181febdf-4ec0-45f1-a062-f2f097504deb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.719260 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7" (OuterVolumeSpecName: "kube-api-access-7r2c7") pod "181febdf-4ec0-45f1-a062-f2f097504deb" (UID: "181febdf-4ec0-45f1-a062-f2f097504deb"). InnerVolumeSpecName "kube-api-access-7r2c7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.724258 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" event={"ID":"b0bf43ab-ac15-4d14-805b-261acabed1f6","Type":"ContainerStarted","Data":"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb"} Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.727176 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="dnsmasq-dns" containerID="cri-o://99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb" gracePeriod=10 Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.727340 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.737611 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb8tg\" (UniqueName: \"kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg\") pod \"neutron-db-sync-qd8nf\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.744940 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.754008 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wn476" event={"ID":"181febdf-4ec0-45f1-a062-f2f097504deb","Type":"ContainerDied","Data":"54e4e1ee0dece242144f628b04d704e57c9768a133a84b6d244725cd09764ed7"} Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.754056 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54e4e1ee0dece242144f628b04d704e57c9768a133a84b6d244725cd09764ed7" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.754138 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wn476" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.766635 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "181febdf-4ec0-45f1-a062-f2f097504deb" (UID: "181febdf-4ec0-45f1-a062-f2f097504deb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774594 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774677 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774790 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774845 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774913 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.774994 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.775043 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.775066 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.775099 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w4v6\" (UniqueName: \"kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.776515 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvkdk\" (UniqueName: \"kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.776580 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.776658 4938 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.776672 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.776684 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7r2c7\" (UniqueName: \"kubernetes.io/projected/181febdf-4ec0-45f1-a062-f2f097504deb-kube-api-access-7r2c7\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.777751 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.778585 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.778633 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.778857 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.780226 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data" (OuterVolumeSpecName: "config-data") pod "181febdf-4ec0-45f1-a062-f2f097504deb" (UID: "181febdf-4ec0-45f1-a062-f2f097504deb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.788897 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vs774"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.789903 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.796492 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.804402 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-ddndr" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.809818 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w4v6\" (UniqueName: \"kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6\") pod \"dnsmasq-dns-cf78879c9-kxspj\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.827476 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.829294 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.836197 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.852546 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p9nkx"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879014 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879085 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879125 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvkdk\" (UniqueName: \"kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879159 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879189 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879226 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvmgz\" (UniqueName: \"kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879292 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879323 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879386 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181febdf-4ec0-45f1-a062-f2f097504deb-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.879972 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.883181 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.891301 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.894778 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.896116 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.910915 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvkdk\" (UniqueName: \"kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk\") pod \"placement-db-sync-vs774\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " pod="openstack/placement-db-sync-vs774" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.920564 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.931045 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.937409 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.942081 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.942310 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.972509 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.983373 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.983694 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.983812 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvmgz\" (UniqueName: \"kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.986837 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k65gk\" (UniqueName: \"kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.990085 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.994745 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.994815 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.994923 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:27 crc kubenswrapper[4938]: I1122 10:57:27.994983 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.003038 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" podStartSLOduration=4.003013345 podStartE2EDuration="4.003013345s" podCreationTimestamp="2025-11-22 10:57:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:27.754696465 +0000 UTC m=+1180.222533874" watchObservedRunningTime="2025-11-22 10:57:28.003013345 +0000 UTC m=+1180.470850734" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.005506 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.021591 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vs774" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.041042 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvmgz\" (UniqueName: \"kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz\") pod \"barbican-db-sync-p9nkx\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.048559 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.054576 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.064424 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.075599 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096568 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k65gk\" (UniqueName: \"kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096633 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096658 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096681 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096697 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096718 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzf9c\" (UniqueName: \"kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096735 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096753 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096783 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096799 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096820 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096839 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096854 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096879 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096894 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q82jp\" (UniqueName: \"kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096907 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096927 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.096972 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.102202 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.102533 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.108238 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.108767 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.148762 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k65gk\" (UniqueName: \"kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk\") pod \"horizon-68d465898c-vtz2k\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.180344 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.197919 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q82jp\" (UniqueName: \"kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198245 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198264 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198285 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198375 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198404 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198423 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198448 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzf9c\" (UniqueName: \"kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198489 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198506 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198531 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198550 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.198568 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.202293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.224321 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.225307 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.226293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.227052 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.228623 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.233797 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.285480 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.305013 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q82jp\" (UniqueName: \"kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp\") pod \"dnsmasq-dns-56df8fb6b7-mf78l\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.308836 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.311508 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.312383 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.324524 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.325158 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzf9c\" (UniqueName: \"kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c\") pod \"ceilometer-0\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.470492 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.524334 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:28 crc kubenswrapper[4938]: W1122 10:57:28.571917 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8619f69b_8834_462f_acad_d6d566273988.slice/crio-5ad811d9978336676d6c963c5d13b21d3d5feddce65ace54e24ec3167eb4443e WatchSource:0}: Error finding container 5ad811d9978336676d6c963c5d13b21d3d5feddce65ace54e24ec3167eb4443e: Status 404 returned error can't find the container with id 5ad811d9978336676d6c963c5d13b21d3d5feddce65ace54e24ec3167eb4443e Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.595092 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.676746 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dwqmw"] Nov 22 10:57:28 crc kubenswrapper[4938]: W1122 10:57:28.694745 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d9c56e4_5398_4e98_b6d7_564bb36e8e95.slice/crio-88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191 WatchSource:0}: Error finding container 88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191: Status 404 returned error can't find the container with id 88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191 Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.738504 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.780234 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dwqmw" event={"ID":"6d9c56e4-5398-4e98-b6d7-564bb36e8e95","Type":"ContainerStarted","Data":"88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191"} Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.789171 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" event={"ID":"8619f69b-8834-462f-acad-d6d566273988","Type":"ContainerStarted","Data":"5ad811d9978336676d6c963c5d13b21d3d5feddce65ace54e24ec3167eb4443e"} Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.792804 4938 generic.go:334] "Generic (PLEG): container finished" podID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerID="99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb" exitCode=0 Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.792848 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" event={"ID":"b0bf43ab-ac15-4d14-805b-261acabed1f6","Type":"ContainerDied","Data":"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb"} Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.792874 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" event={"ID":"b0bf43ab-ac15-4d14-805b-261acabed1f6","Type":"ContainerDied","Data":"abfacab3bbf5bbfaf2b43bca6312ddf09c6a0ec033e091b6d0174b50a14419e3"} Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.792889 4938 scope.go:117] "RemoveContainer" containerID="99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.793029 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hwg8q" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.834113 4938 scope.go:117] "RemoveContainer" containerID="adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.891383 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:28 crc kubenswrapper[4938]: E1122 10:57:28.891726 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="init" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.891743 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="init" Nov 22 10:57:28 crc kubenswrapper[4938]: E1122 10:57:28.891781 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="dnsmasq-dns" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.891788 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="dnsmasq-dns" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.891968 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" containerName="dnsmasq-dns" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.892777 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.894112 4938 scope.go:117] "RemoveContainer" containerID="99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.894776 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c5zth" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.895103 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.895838 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:57:28 crc kubenswrapper[4938]: E1122 10:57:28.896211 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb\": container with ID starting with 99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb not found: ID does not exist" containerID="99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.896246 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb"} err="failed to get container status \"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb\": rpc error: code = NotFound desc = could not find container \"99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb\": container with ID starting with 99b56301e4041275c40738389d5e3f3696e89ec67b480702bb74104574796eeb not found: ID does not exist" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.896274 4938 scope.go:117] "RemoveContainer" containerID="adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7" Nov 22 10:57:28 crc kubenswrapper[4938]: E1122 10:57:28.899317 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7\": container with ID starting with adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7 not found: ID does not exist" containerID="adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.899357 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7"} err="failed to get container status \"adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7\": rpc error: code = NotFound desc = could not find container \"adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7\": container with ID starting with adf100510518f8bec518c64b344c2a046620f5edcc020e1c8ca3cb4e07ee0de7 not found: ID does not exist" Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916001 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916097 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916153 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916177 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916219 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gfzv\" (UniqueName: \"kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:28 crc kubenswrapper[4938]: I1122 10:57:28.916326 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0\") pod \"b0bf43ab-ac15-4d14-805b-261acabed1f6\" (UID: \"b0bf43ab-ac15-4d14-805b-261acabed1f6\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:28.940618 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv" (OuterVolumeSpecName: "kube-api-access-4gfzv") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "kube-api-access-4gfzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:28.973419 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-qd8nf"] Nov 22 10:57:30 crc kubenswrapper[4938]: W1122 10:57:29.013756 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0b7fdd3_fa8e_4ebe_a8e9_424409b5b336.slice/crio-5e84722049a612e978d7035614c6b7594701e21aac0bc64a2326b6dc5d64496f WatchSource:0}: Error finding container 5e84722049a612e978d7035614c6b7594701e21aac0bc64a2326b6dc5d64496f: Status 404 returned error can't find the container with id 5e84722049a612e978d7035614c6b7594701e21aac0bc64a2326b6dc5d64496f Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.016394 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config" (OuterVolumeSpecName: "config") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.022440 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mczdw\" (UniqueName: \"kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.023538 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.023623 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.023834 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.023887 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.023971 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.024040 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.026562 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.026597 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gfzv\" (UniqueName: \"kubernetes.io/projected/b0bf43ab-ac15-4d14-805b-261acabed1f6-kube-api-access-4gfzv\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.028430 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.035525 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.049295 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.061512 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.076076 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.078943 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b0bf43ab-ac15-4d14-805b-261acabed1f6" (UID: "b0bf43ab-ac15-4d14-805b-261acabed1f6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.087156 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-ddndr"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.108780 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.129735 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vs774"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.130338 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.130374 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.130409 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.130446 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.130768 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.133861 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mczdw\" (UniqueName: \"kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.133962 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.134029 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.134140 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.134151 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.134160 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.134171 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b0bf43ab-ac15-4d14-805b-261acabed1f6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.136059 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.136071 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.136429 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.138314 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.142036 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.147684 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hwg8q"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.158385 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.159778 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.161666 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mczdw\" (UniqueName: \"kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.184482 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.273690 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.288674 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.290832 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.296440 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.316358 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.439964 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440001 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440041 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440062 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440107 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpts8\" (UniqueName: \"kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440146 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.440187 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.541413 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.541764 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542149 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542578 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542619 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542673 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpts8\" (UniqueName: \"kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542714 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.542761 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.543504 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.543970 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.547767 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.548731 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.549541 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.567542 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpts8\" (UniqueName: \"kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.581535 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.775808 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.837110 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vs774" event={"ID":"973da40a-c63d-4e06-8750-c3d31d8b0abb","Type":"ContainerStarted","Data":"be112e98bff5a6bd83dc0f676fbf80e72d637c0ea9b016b10849674e13e43347"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.842784 4938 generic.go:334] "Generic (PLEG): container finished" podID="e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" containerID="85f1cf00e7c02a9ff05407ed799d2f62dd7c2e96c53956e881101353f6eb11b3" exitCode=0 Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.843037 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" event={"ID":"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336","Type":"ContainerDied","Data":"85f1cf00e7c02a9ff05407ed799d2f62dd7c2e96c53956e881101353f6eb11b3"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.843067 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" event={"ID":"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336","Type":"ContainerStarted","Data":"5e84722049a612e978d7035614c6b7594701e21aac0bc64a2326b6dc5d64496f"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.858233 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-ddndr" event={"ID":"745ffa75-881b-4c0d-8f61-70d872617409","Type":"ContainerStarted","Data":"4c6e9b7ec1e60c13244b991088d3fcebeb5a623a6763d3c2a2adab48dd968cf8"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.872209 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qd8nf" event={"ID":"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146","Type":"ContainerStarted","Data":"8bac59801cf21b8548fe8306bf06f75f3d14cd3e9f59ae1887aa1b45baada354"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.872265 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qd8nf" event={"ID":"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146","Type":"ContainerStarted","Data":"cc09d69a9d593c605f3c808232aa39dd747fa963bc22646b850adf78c2326a4b"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.891034 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dwqmw" event={"ID":"6d9c56e4-5398-4e98-b6d7-564bb36e8e95","Type":"ContainerStarted","Data":"68dca4e446261896aa331ae54d57f9ea910e2aa585ce6d395866cc115f6957f1"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.909376 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-qd8nf" podStartSLOduration=2.909355266 podStartE2EDuration="2.909355266s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:29.895003026 +0000 UTC m=+1182.362840425" watchObservedRunningTime="2025-11-22 10:57:29.909355266 +0000 UTC m=+1182.377192685" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.916938 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68d465898c-vtz2k" event={"ID":"f42b1674-4892-4907-a0e8-5aab697ff7aa","Type":"ContainerStarted","Data":"d38ae130a34752bea20aa169908560ca0279e935b5161f835d0ea662f67bb400"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.920034 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dwqmw" podStartSLOduration=3.920017423 podStartE2EDuration="3.920017423s" podCreationTimestamp="2025-11-22 10:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:29.917441708 +0000 UTC m=+1182.385279107" watchObservedRunningTime="2025-11-22 10:57:29.920017423 +0000 UTC m=+1182.387854822" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.928491 4938 generic.go:334] "Generic (PLEG): container finished" podID="8619f69b-8834-462f-acad-d6d566273988" containerID="250e62f5876766d7b6dd5bd4fad760ab8ee73e3aa21d3937787fea1752d60a80" exitCode=0 Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.928581 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" event={"ID":"8619f69b-8834-462f-acad-d6d566273988","Type":"ContainerDied","Data":"250e62f5876766d7b6dd5bd4fad760ab8ee73e3aa21d3937787fea1752d60a80"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:29.934312 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df769749-fdrcj" event={"ID":"46d9b0d4-89b4-46b5-b803-b840e07ed7fe","Type":"ContainerStarted","Data":"0c814b3fbcd7c78194c29dc407a38ff2add1868d0b2951ec11f51249db2f85a9"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.327696 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.384957 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.398691 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.400310 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.427201 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.465084 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.465437 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.465501 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc555\" (UniqueName: \"kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.465524 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.465620 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.494559 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0bf43ab-ac15-4d14-805b-261acabed1f6" path="/var/lib/kubelet/pods/b0bf43ab-ac15-4d14-805b-261acabed1f6/volumes" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.495900 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.557899 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.580603 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.580737 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.580758 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.580861 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc555\" (UniqueName: \"kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.580889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.585005 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.586530 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.587359 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.590996 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.606609 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p9nkx"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.614544 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc555\" (UniqueName: \"kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555\") pod \"horizon-84db767bd9-qw4kz\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.743757 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.744582 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.752959 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.753172 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.758761 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809046 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809098 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809123 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809192 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809238 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5w4v6\" (UniqueName: \"kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809298 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809331 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809406 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809432 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809485 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trmhj\" (UniqueName: \"kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj\") pod \"8619f69b-8834-462f-acad-d6d566273988\" (UID: \"8619f69b-8834-462f-acad-d6d566273988\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809551 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.809575 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb\") pod \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\" (UID: \"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336\") " Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.819017 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.836683 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6" (OuterVolumeSpecName: "kube-api-access-5w4v6") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "kube-api-access-5w4v6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.844715 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj" (OuterVolumeSpecName: "kube-api-access-trmhj") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "kube-api-access-trmhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.879698 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.895633 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.904945 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.905438 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.909158 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912128 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912164 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912178 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912192 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5w4v6\" (UniqueName: \"kubernetes.io/projected/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-kube-api-access-5w4v6\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912206 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912218 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.912230 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trmhj\" (UniqueName: \"kubernetes.io/projected/8619f69b-8834-462f-acad-d6d566273988-kube-api-access-trmhj\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.915984 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config" (OuterVolumeSpecName: "config") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.917768 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.918551 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8619f69b-8834-462f-acad-d6d566273988" (UID: "8619f69b-8834-462f-acad-d6d566273988"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.922063 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config" (OuterVolumeSpecName: "config") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.924441 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.939401 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" (UID: "e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.988701 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" event={"ID":"e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336","Type":"ContainerDied","Data":"5e84722049a612e978d7035614c6b7594701e21aac0bc64a2326b6dc5d64496f"} Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.988760 4938 scope.go:117] "RemoveContainer" containerID="85f1cf00e7c02a9ff05407ed799d2f62dd7c2e96c53956e881101353f6eb11b3" Nov 22 10:57:30 crc kubenswrapper[4938]: I1122 10:57:30.988872 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-kxspj" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:30.998969 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p9nkx" event={"ID":"c795cb12-352a-40bf-b828-4c4d16472eea","Type":"ContainerStarted","Data":"7e90d233a457e16fa0f62af0461afc3af36605a6de0cb371a90025c5aafe6c3b"} Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.015187 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.015235 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8619f69b-8834-462f-acad-d6d566273988-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.015244 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.015253 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.015261 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.016112 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" event={"ID":"8619f69b-8834-462f-acad-d6d566273988","Type":"ContainerDied","Data":"5ad811d9978336676d6c963c5d13b21d3d5feddce65ace54e24ec3167eb4443e"} Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.016266 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-bdsk2" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.026035 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerStarted","Data":"7af5990fa7862e6bc8edb36ee82d59949c5f752190f1ef9b79e02ffebc6dea3e"} Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.038467 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerStarted","Data":"01e60cc95fa9fad5cd234bdab595c0f76a69e1b0ccdb459e5e534883215037dc"} Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.048611 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" event={"ID":"a071ebb7-d74e-4b58-b01d-e20eaf91150e","Type":"ContainerStarted","Data":"303e28e9c6fbe9ffce079bd051d6d96612aba4640bf2555235b7a6a12ffb5878"} Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.077137 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.102068 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-kxspj"] Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.112811 4938 scope.go:117] "RemoveContainer" containerID="250e62f5876766d7b6dd5bd4fad760ab8ee73e3aa21d3937787fea1752d60a80" Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.134595 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.141773 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-bdsk2"] Nov 22 10:57:31 crc kubenswrapper[4938]: I1122 10:57:31.538216 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:57:31 crc kubenswrapper[4938]: W1122 10:57:31.556187 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55be79ce_3816_4644_9e33_3762615249e3.slice/crio-7d246379168f85ddf55c6d664d0c8f0547674b53b4cd82b40dcda7e50fd3a0bc WatchSource:0}: Error finding container 7d246379168f85ddf55c6d664d0c8f0547674b53b4cd82b40dcda7e50fd3a0bc: Status 404 returned error can't find the container with id 7d246379168f85ddf55c6d664d0c8f0547674b53b4cd82b40dcda7e50fd3a0bc Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.059591 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerStarted","Data":"65a718e088eb4bff4cf4693c593480973407be925295c6e73e015c1494b3f129"} Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.069333 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerStarted","Data":"7d246379168f85ddf55c6d664d0c8f0547674b53b4cd82b40dcda7e50fd3a0bc"} Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.073204 4938 generic.go:334] "Generic (PLEG): container finished" podID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerID="19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6" exitCode=0 Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.073269 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" event={"ID":"a071ebb7-d74e-4b58-b01d-e20eaf91150e","Type":"ContainerDied","Data":"19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6"} Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.489179 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8619f69b-8834-462f-acad-d6d566273988" path="/var/lib/kubelet/pods/8619f69b-8834-462f-acad-d6d566273988/volumes" Nov 22 10:57:32 crc kubenswrapper[4938]: I1122 10:57:32.491042 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" path="/var/lib/kubelet/pods/e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336/volumes" Nov 22 10:57:33 crc kubenswrapper[4938]: I1122 10:57:33.097102 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerStarted","Data":"91c799c9703e2f0c0a18f8042b46b1a1ad6838dfa07cc680cfd63ce909a7b145"} Nov 22 10:57:33 crc kubenswrapper[4938]: I1122 10:57:33.101710 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" event={"ID":"a071ebb7-d74e-4b58-b01d-e20eaf91150e","Type":"ContainerStarted","Data":"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716"} Nov 22 10:57:33 crc kubenswrapper[4938]: I1122 10:57:33.102743 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:33 crc kubenswrapper[4938]: I1122 10:57:33.109691 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerStarted","Data":"bed7ae043c6fb20d75d5daceb68f38992ec2afc28447d8d23f2312188ad29a9a"} Nov 22 10:57:33 crc kubenswrapper[4938]: I1122 10:57:33.129393 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" podStartSLOduration=5.129375393 podStartE2EDuration="5.129375393s" podCreationTimestamp="2025-11-22 10:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:33.124335677 +0000 UTC m=+1185.592173076" watchObservedRunningTime="2025-11-22 10:57:33.129375393 +0000 UTC m=+1185.597212792" Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.129661 4938 generic.go:334] "Generic (PLEG): container finished" podID="6d9c56e4-5398-4e98-b6d7-564bb36e8e95" containerID="68dca4e446261896aa331ae54d57f9ea910e2aa585ce6d395866cc115f6957f1" exitCode=0 Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.129841 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dwqmw" event={"ID":"6d9c56e4-5398-4e98-b6d7-564bb36e8e95","Type":"ContainerDied","Data":"68dca4e446261896aa331ae54d57f9ea910e2aa585ce6d395866cc115f6957f1"} Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.134573 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-log" containerID="cri-o://bed7ae043c6fb20d75d5daceb68f38992ec2afc28447d8d23f2312188ad29a9a" gracePeriod=30 Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.134759 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerStarted","Data":"6e908aa00e34cab9366382c70382b3abb605ef9615bfb22effc4180f4f38a939"} Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.134831 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-httpd" containerID="cri-o://6e908aa00e34cab9366382c70382b3abb605ef9615bfb22effc4180f4f38a939" gracePeriod=30 Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.143081 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-log" containerID="cri-o://91c799c9703e2f0c0a18f8042b46b1a1ad6838dfa07cc680cfd63ce909a7b145" gracePeriod=30 Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.143179 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerStarted","Data":"3a52fccb7fc4e48d69bc647a9429f24ecfaa0e7e36998ed6819f0c3e0044cbdd"} Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.143239 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-httpd" containerID="cri-o://3a52fccb7fc4e48d69bc647a9429f24ecfaa0e7e36998ed6819f0c3e0044cbdd" gracePeriod=30 Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.174677 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.174540803 podStartE2EDuration="6.174540803s" podCreationTimestamp="2025-11-22 10:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:34.169686212 +0000 UTC m=+1186.637523611" watchObservedRunningTime="2025-11-22 10:57:34.174540803 +0000 UTC m=+1186.642378212" Nov 22 10:57:34 crc kubenswrapper[4938]: I1122 10:57:34.190694 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.190679167 podStartE2EDuration="7.190679167s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:34.188467002 +0000 UTC m=+1186.656304401" watchObservedRunningTime="2025-11-22 10:57:34.190679167 +0000 UTC m=+1186.658516566" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.157505 4938 generic.go:334] "Generic (PLEG): container finished" podID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerID="bed7ae043c6fb20d75d5daceb68f38992ec2afc28447d8d23f2312188ad29a9a" exitCode=143 Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.157741 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerDied","Data":"bed7ae043c6fb20d75d5daceb68f38992ec2afc28447d8d23f2312188ad29a9a"} Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.161109 4938 generic.go:334] "Generic (PLEG): container finished" podID="5b538ad6-8101-49ba-80c7-084031916ee4" containerID="3a52fccb7fc4e48d69bc647a9429f24ecfaa0e7e36998ed6819f0c3e0044cbdd" exitCode=0 Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.161137 4938 generic.go:334] "Generic (PLEG): container finished" podID="5b538ad6-8101-49ba-80c7-084031916ee4" containerID="91c799c9703e2f0c0a18f8042b46b1a1ad6838dfa07cc680cfd63ce909a7b145" exitCode=143 Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.161206 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerDied","Data":"3a52fccb7fc4e48d69bc647a9429f24ecfaa0e7e36998ed6819f0c3e0044cbdd"} Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.161251 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerDied","Data":"91c799c9703e2f0c0a18f8042b46b1a1ad6838dfa07cc680cfd63ce909a7b145"} Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.498221 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601135 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601211 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601305 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4xxm\" (UniqueName: \"kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601476 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601543 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.601602 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys\") pod \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\" (UID: \"6d9c56e4-5398-4e98-b6d7-564bb36e8e95\") " Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.646140 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts" (OuterVolumeSpecName: "scripts") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.646214 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.646368 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.662069 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm" (OuterVolumeSpecName: "kube-api-access-q4xxm") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "kube-api-access-q4xxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.666049 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.667875 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data" (OuterVolumeSpecName: "config-data") pod "6d9c56e4-5398-4e98-b6d7-564bb36e8e95" (UID: "6d9c56e4-5398-4e98-b6d7-564bb36e8e95"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704606 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704648 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704661 4938 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704675 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704688 4938 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:35 crc kubenswrapper[4938]: I1122 10:57:35.704699 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4xxm\" (UniqueName: \"kubernetes.io/projected/6d9c56e4-5398-4e98-b6d7-564bb36e8e95-kube-api-access-q4xxm\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.177225 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dwqmw" event={"ID":"6d9c56e4-5398-4e98-b6d7-564bb36e8e95","Type":"ContainerDied","Data":"88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191"} Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.177466 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88cf208d6160243e4fa50ae6a5cb658d3dbe0798a3eac7566fdb3db249587191" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.177252 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dwqmw" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.179429 4938 generic.go:334] "Generic (PLEG): container finished" podID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerID="6e908aa00e34cab9366382c70382b3abb605ef9615bfb22effc4180f4f38a939" exitCode=0 Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.179454 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerDied","Data":"6e908aa00e34cab9366382c70382b3abb605ef9615bfb22effc4180f4f38a939"} Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.266348 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dwqmw"] Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.273306 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dwqmw"] Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351020 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-psj8d"] Nov 22 10:57:36 crc kubenswrapper[4938]: E1122 10:57:36.351381 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d9c56e4-5398-4e98-b6d7-564bb36e8e95" containerName="keystone-bootstrap" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351398 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d9c56e4-5398-4e98-b6d7-564bb36e8e95" containerName="keystone-bootstrap" Nov 22 10:57:36 crc kubenswrapper[4938]: E1122 10:57:36.351412 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351418 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: E1122 10:57:36.351428 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8619f69b-8834-462f-acad-d6d566273988" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351434 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8619f69b-8834-462f-acad-d6d566273988" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351600 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8619f69b-8834-462f-acad-d6d566273988" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351616 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d9c56e4-5398-4e98-b6d7-564bb36e8e95" containerName="keystone-bootstrap" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.351634 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0b7fdd3-fa8e-4ebe-a8e9-424409b5b336" containerName="init" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.352193 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.357853 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.358142 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.358301 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ct79k" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.360726 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.363075 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-psj8d"] Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.421638 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.421735 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.424550 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.424806 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm8dr\" (UniqueName: \"kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.425020 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.425077 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.457899 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d9c56e4-5398-4e98-b6d7-564bb36e8e95" path="/var/lib/kubelet/pods/6d9c56e4-5398-4e98-b6d7-564bb36e8e95/volumes" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526563 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm8dr\" (UniqueName: \"kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526623 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526665 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526717 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526758 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.526806 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.534616 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.534831 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.538236 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.539879 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.543378 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.544169 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm8dr\" (UniqueName: \"kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr\") pod \"keystone-bootstrap-psj8d\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:36 crc kubenswrapper[4938]: I1122 10:57:36.678469 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.746378 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.754814 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.848869 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849215 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mczdw\" (UniqueName: \"kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849267 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849348 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849385 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849419 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpts8\" (UniqueName: \"kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849462 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849496 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849534 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849562 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849605 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849629 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849647 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run\") pod \"5b538ad6-8101-49ba-80c7-084031916ee4\" (UID: \"5b538ad6-8101-49ba-80c7-084031916ee4\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849693 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\" (UID: \"e8e05aaf-26e9-4787-aec7-8fc6de68684d\") " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849862 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.849898 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs" (OuterVolumeSpecName: "logs") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.850146 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs" (OuterVolumeSpecName: "logs") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.850253 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.850271 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e8e05aaf-26e9-4787-aec7-8fc6de68684d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.850292 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.857080 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts" (OuterVolumeSpecName: "scripts") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.857095 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.857072 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.857081 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8" (OuterVolumeSpecName: "kube-api-access-gpts8") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "kube-api-access-gpts8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.857138 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw" (OuterVolumeSpecName: "kube-api-access-mczdw") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "kube-api-access-mczdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.864523 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts" (OuterVolumeSpecName: "scripts") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.882147 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.891660 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.905109 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data" (OuterVolumeSpecName: "config-data") pod "5b538ad6-8101-49ba-80c7-084031916ee4" (UID: "5b538ad6-8101-49ba-80c7-084031916ee4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.915797 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data" (OuterVolumeSpecName: "config-data") pod "e8e05aaf-26e9-4787-aec7-8fc6de68684d" (UID: "e8e05aaf-26e9-4787-aec7-8fc6de68684d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952326 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952359 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952368 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952379 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952391 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952400 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952408 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mczdw\" (UniqueName: \"kubernetes.io/projected/e8e05aaf-26e9-4787-aec7-8fc6de68684d-kube-api-access-mczdw\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952417 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b538ad6-8101-49ba-80c7-084031916ee4-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952424 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952431 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8e05aaf-26e9-4787-aec7-8fc6de68684d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952440 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpts8\" (UniqueName: \"kubernetes.io/projected/5b538ad6-8101-49ba-80c7-084031916ee4-kube-api-access-gpts8\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.952448 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b538ad6-8101-49ba-80c7-084031916ee4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.979176 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 22 10:57:37 crc kubenswrapper[4938]: I1122 10:57:37.980726 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.053568 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.053600 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.201421 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.201413 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5b538ad6-8101-49ba-80c7-084031916ee4","Type":"ContainerDied","Data":"01e60cc95fa9fad5cd234bdab595c0f76a69e1b0ccdb459e5e534883215037dc"} Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.201569 4938 scope.go:117] "RemoveContainer" containerID="3a52fccb7fc4e48d69bc647a9429f24ecfaa0e7e36998ed6819f0c3e0044cbdd" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.203406 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e8e05aaf-26e9-4787-aec7-8fc6de68684d","Type":"ContainerDied","Data":"65a718e088eb4bff4cf4693c593480973407be925295c6e73e015c1494b3f129"} Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.203497 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.238106 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.247844 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.256136 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.266057 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.277091 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: E1122 10:57:38.277548 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.277565 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: E1122 10:57:38.277616 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.277622 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: E1122 10:57:38.277637 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.277644 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: E1122 10:57:38.277653 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.277658 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.278156 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.278208 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.278238 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" containerName="glance-httpd" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.278248 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" containerName="glance-log" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.279426 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.282878 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.283201 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c5zth" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.283369 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.288097 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.295866 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.297858 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.300415 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.306790 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361096 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361139 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361163 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361608 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361688 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.361806 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362167 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362310 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362372 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmgc2\" (UniqueName: \"kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362662 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362704 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckddr\" (UniqueName: \"kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362763 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.362899 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.465117 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.465840 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.465287 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.466844 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.467448 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b538ad6-8101-49ba-80c7-084031916ee4" path="/var/lib/kubelet/pods/5b538ad6-8101-49ba-80c7-084031916ee4/volumes" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.467972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468050 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468100 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468132 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmgc2\" (UniqueName: \"kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468231 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8e05aaf-26e9-4787-aec7-8fc6de68684d" path="/var/lib/kubelet/pods/e8e05aaf-26e9-4787-aec7-8fc6de68684d/volumes" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468315 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468363 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckddr\" (UniqueName: \"kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468802 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468862 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468895 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.468969 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.469050 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.469083 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.469270 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.473846 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.473896 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.474167 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.475345 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.480566 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.483228 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.484966 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.492498 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.493326 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.494000 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmgc2\" (UniqueName: \"kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.500541 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.503212 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.505118 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckddr\" (UniqueName: \"kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.516889 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.620716 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.658452 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.658717 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" containerID="cri-o://a6c5504d4e82ebc60278076f0055b48c5966ef6e35797f69681db5d528f8674d" gracePeriod=10 Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.659046 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:38 crc kubenswrapper[4938]: I1122 10:57:38.914343 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.005048 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7bb4f8b4bd-qj489"] Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.006560 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.010986 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.052173 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb4f8b4bd-qj489"] Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.085992 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.138287 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.189825 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-scripts\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.189893 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-config-data\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.190017 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r5b7\" (UniqueName: \"kubernetes.io/projected/52d01853-e609-4339-a336-78e1b9f4f704-kube-api-access-5r5b7\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.190046 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-secret-key\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.190065 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-tls-certs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.190114 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-combined-ca-bundle\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.190191 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52d01853-e609-4339-a336-78e1b9f4f704-logs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.215187 4938 generic.go:334] "Generic (PLEG): container finished" podID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerID="a6c5504d4e82ebc60278076f0055b48c5966ef6e35797f69681db5d528f8674d" exitCode=0 Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.215241 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" event={"ID":"878d8a17-a88c-43ae-930e-01a652f87d2b","Type":"ContainerDied","Data":"a6c5504d4e82ebc60278076f0055b48c5966ef6e35797f69681db5d528f8674d"} Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291340 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-scripts\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291397 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-config-data\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291460 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r5b7\" (UniqueName: \"kubernetes.io/projected/52d01853-e609-4339-a336-78e1b9f4f704-kube-api-access-5r5b7\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291479 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-secret-key\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291493 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-tls-certs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291518 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-combined-ca-bundle\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.291571 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52d01853-e609-4339-a336-78e1b9f4f704-logs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.292777 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52d01853-e609-4339-a336-78e1b9f4f704-logs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.294254 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-scripts\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.294850 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/52d01853-e609-4339-a336-78e1b9f4f704-config-data\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.312659 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-combined-ca-bundle\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.313242 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-secret-key\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.314844 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/52d01853-e609-4339-a336-78e1b9f4f704-horizon-tls-certs\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.315745 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r5b7\" (UniqueName: \"kubernetes.io/projected/52d01853-e609-4339-a336-78e1b9f4f704-kube-api-access-5r5b7\") pod \"horizon-7bb4f8b4bd-qj489\" (UID: \"52d01853-e609-4339-a336-78e1b9f4f704\") " pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:39 crc kubenswrapper[4938]: I1122 10:57:39.351217 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:57:40 crc kubenswrapper[4938]: I1122 10:57:40.823469 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:57:45 crc kubenswrapper[4938]: I1122 10:57:45.823155 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:57:50 crc kubenswrapper[4938]: I1122 10:57:50.823601 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:57:50 crc kubenswrapper[4938]: I1122 10:57:50.824192 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:57:55 crc kubenswrapper[4938]: I1122 10:57:55.823810 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:57:57 crc kubenswrapper[4938]: E1122 10:57:57.223705 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:57:57 crc kubenswrapper[4938]: E1122 10:57:57.224174 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n684hf5hbbh66fh79h8fh9fh56bh66fh84hb5h556h669h9bhbdh64fh54ch56dh64dh65chcfh66bh6fh584h68h5bh5bh78hbh5c4h655h546q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-29p8b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6df769749-fdrcj_openstack(46d9b0d4-89b4-46b5-b803-b840e07ed7fe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:57 crc kubenswrapper[4938]: E1122 10:57:57.456963 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6df769749-fdrcj" podUID="46d9b0d4-89b4-46b5-b803-b840e07ed7fe" Nov 22 10:58:00 crc kubenswrapper[4938]: I1122 10:58:00.823704 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.783207 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.783763 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n59ch57bhc7h77h68dh94h65fh68h68h65h75h576h695h599hd4h666h5b8h54dh68h56dh676hcdh574h5f9hf8h54bh86h5dfh65hf7h5f8h674q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vc555,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-84db767bd9-qw4kz_openstack(55be79ce-3816-4644-9e33-3762615249e3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.786183 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-84db767bd9-qw4kz" podUID="55be79ce-3816-4644-9e33-3762615249e3" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.796950 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.797104 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n646h65h74h5d8h58ch557h687h659h59h577h554h5bfh577h555h667hcdh685h5c4h64h68ch56h97h5f5h677h644h57fh655h57dhc8h587h58ch6cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k65gk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-68d465898c-vtz2k_openstack(f42b1674-4892-4907-a0e8-5aab697ff7aa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:58:05 crc kubenswrapper[4938]: E1122 10:58:05.799283 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-68d465898c-vtz2k" podUID="f42b1674-4892-4907-a0e8-5aab697ff7aa" Nov 22 10:58:05 crc kubenswrapper[4938]: I1122 10:58:05.823856 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 22 10:58:06 crc kubenswrapper[4938]: E1122 10:58:06.442784 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-84db767bd9-qw4kz" podUID="55be79ce-3816-4644-9e33-3762615249e3" Nov 22 10:58:07 crc kubenswrapper[4938]: I1122 10:58:07.835379 4938 scope.go:117] "RemoveContainer" containerID="91c799c9703e2f0c0a18f8042b46b1a1ad6838dfa07cc680cfd63ce909a7b145" Nov 22 10:58:12 crc kubenswrapper[4938]: I1122 10:58:12.970048 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.125465 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data\") pod \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.125512 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs\") pod \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.125723 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29p8b\" (UniqueName: \"kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b\") pod \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.125748 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts\") pod \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.125793 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key\") pod \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\" (UID: \"46d9b0d4-89b4-46b5-b803-b840e07ed7fe\") " Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.127545 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data" (OuterVolumeSpecName: "config-data") pod "46d9b0d4-89b4-46b5-b803-b840e07ed7fe" (UID: "46d9b0d4-89b4-46b5-b803-b840e07ed7fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.128227 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts" (OuterVolumeSpecName: "scripts") pod "46d9b0d4-89b4-46b5-b803-b840e07ed7fe" (UID: "46d9b0d4-89b4-46b5-b803-b840e07ed7fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.128511 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs" (OuterVolumeSpecName: "logs") pod "46d9b0d4-89b4-46b5-b803-b840e07ed7fe" (UID: "46d9b0d4-89b4-46b5-b803-b840e07ed7fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.133979 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b" (OuterVolumeSpecName: "kube-api-access-29p8b") pod "46d9b0d4-89b4-46b5-b803-b840e07ed7fe" (UID: "46d9b0d4-89b4-46b5-b803-b840e07ed7fe"). InnerVolumeSpecName "kube-api-access-29p8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.152945 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "46d9b0d4-89b4-46b5-b803-b840e07ed7fe" (UID: "46d9b0d4-89b4-46b5-b803-b840e07ed7fe"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.228069 4938 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.228098 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.228108 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.228117 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29p8b\" (UniqueName: \"kubernetes.io/projected/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-kube-api-access-29p8b\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.228127 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46d9b0d4-89b4-46b5-b803-b840e07ed7fe-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.496327 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6df769749-fdrcj" event={"ID":"46d9b0d4-89b4-46b5-b803-b840e07ed7fe","Type":"ContainerDied","Data":"0c814b3fbcd7c78194c29dc407a38ff2add1868d0b2951ec11f51249db2f85a9"} Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.496374 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6df769749-fdrcj" Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.550681 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:58:13 crc kubenswrapper[4938]: I1122 10:58:13.557284 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6df769749-fdrcj"] Nov 22 10:58:14 crc kubenswrapper[4938]: E1122 10:58:14.296440 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 22 10:58:14 crc kubenswrapper[4938]: E1122 10:58:14.296989 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-69qsr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-ddndr_openstack(745ffa75-881b-4c0d-8f61-70d872617409): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:58:14 crc kubenswrapper[4938]: E1122 10:58:14.298243 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-ddndr" podUID="745ffa75-881b-4c0d-8f61-70d872617409" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.367937 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.371216 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452045 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb\") pod \"878d8a17-a88c-43ae-930e-01a652f87d2b\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452157 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts\") pod \"f42b1674-4892-4907-a0e8-5aab697ff7aa\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452207 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs\") pod \"f42b1674-4892-4907-a0e8-5aab697ff7aa\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452241 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key\") pod \"f42b1674-4892-4907-a0e8-5aab697ff7aa\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452278 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x7t5\" (UniqueName: \"kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5\") pod \"878d8a17-a88c-43ae-930e-01a652f87d2b\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452345 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc\") pod \"878d8a17-a88c-43ae-930e-01a652f87d2b\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452375 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data\") pod \"f42b1674-4892-4907-a0e8-5aab697ff7aa\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452406 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k65gk\" (UniqueName: \"kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk\") pod \"f42b1674-4892-4907-a0e8-5aab697ff7aa\" (UID: \"f42b1674-4892-4907-a0e8-5aab697ff7aa\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452448 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config\") pod \"878d8a17-a88c-43ae-930e-01a652f87d2b\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.452501 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb\") pod \"878d8a17-a88c-43ae-930e-01a652f87d2b\" (UID: \"878d8a17-a88c-43ae-930e-01a652f87d2b\") " Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.453958 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data" (OuterVolumeSpecName: "config-data") pod "f42b1674-4892-4907-a0e8-5aab697ff7aa" (UID: "f42b1674-4892-4907-a0e8-5aab697ff7aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.457494 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5" (OuterVolumeSpecName: "kube-api-access-7x7t5") pod "878d8a17-a88c-43ae-930e-01a652f87d2b" (UID: "878d8a17-a88c-43ae-930e-01a652f87d2b"). InnerVolumeSpecName "kube-api-access-7x7t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.461123 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk" (OuterVolumeSpecName: "kube-api-access-k65gk") pod "f42b1674-4892-4907-a0e8-5aab697ff7aa" (UID: "f42b1674-4892-4907-a0e8-5aab697ff7aa"). InnerVolumeSpecName "kube-api-access-k65gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.462272 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs" (OuterVolumeSpecName: "logs") pod "f42b1674-4892-4907-a0e8-5aab697ff7aa" (UID: "f42b1674-4892-4907-a0e8-5aab697ff7aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.462597 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts" (OuterVolumeSpecName: "scripts") pod "f42b1674-4892-4907-a0e8-5aab697ff7aa" (UID: "f42b1674-4892-4907-a0e8-5aab697ff7aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.465531 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46d9b0d4-89b4-46b5-b803-b840e07ed7fe" path="/var/lib/kubelet/pods/46d9b0d4-89b4-46b5-b803-b840e07ed7fe/volumes" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.474777 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f42b1674-4892-4907-a0e8-5aab697ff7aa" (UID: "f42b1674-4892-4907-a0e8-5aab697ff7aa"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.504555 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "878d8a17-a88c-43ae-930e-01a652f87d2b" (UID: "878d8a17-a88c-43ae-930e-01a652f87d2b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.505720 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68d465898c-vtz2k" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.510545 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "878d8a17-a88c-43ae-930e-01a652f87d2b" (UID: "878d8a17-a88c-43ae-930e-01a652f87d2b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.511251 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" Nov 22 10:58:14 crc kubenswrapper[4938]: E1122 10:58:14.512989 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-ddndr" podUID="745ffa75-881b-4c0d-8f61-70d872617409" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.524273 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config" (OuterVolumeSpecName: "config") pod "878d8a17-a88c-43ae-930e-01a652f87d2b" (UID: "878d8a17-a88c-43ae-930e-01a652f87d2b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.526958 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "878d8a17-a88c-43ae-930e-01a652f87d2b" (UID: "878d8a17-a88c-43ae-930e-01a652f87d2b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554123 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554156 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554166 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f42b1674-4892-4907-a0e8-5aab697ff7aa-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554176 4938 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f42b1674-4892-4907-a0e8-5aab697ff7aa-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554188 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x7t5\" (UniqueName: \"kubernetes.io/projected/878d8a17-a88c-43ae-930e-01a652f87d2b-kube-api-access-7x7t5\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554199 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554209 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f42b1674-4892-4907-a0e8-5aab697ff7aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554219 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k65gk\" (UniqueName: \"kubernetes.io/projected/f42b1674-4892-4907-a0e8-5aab697ff7aa-kube-api-access-k65gk\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554228 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.554238 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/878d8a17-a88c-43ae-930e-01a652f87d2b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.603418 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68d465898c-vtz2k" event={"ID":"f42b1674-4892-4907-a0e8-5aab697ff7aa","Type":"ContainerDied","Data":"d38ae130a34752bea20aa169908560ca0279e935b5161f835d0ea662f67bb400"} Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.603454 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" event={"ID":"878d8a17-a88c-43ae-930e-01a652f87d2b","Type":"ContainerDied","Data":"3a4db7547dfdf5540bc4a08d07cdb766bc786a50c4bda3cd37eb52ce393ba630"} Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.650593 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.658249 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68d465898c-vtz2k"] Nov 22 10:58:14 crc kubenswrapper[4938]: E1122 10:58:14.750143 4938 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf42b1674_4892_4907_a0e8_5aab697ff7aa.slice\": RecentStats: unable to find data in memory cache]" Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.846619 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:58:14 crc kubenswrapper[4938]: I1122 10:58:14.857416 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rsr4t"] Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.150932 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.151106 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gvmgz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-p9nkx_openstack(c795cb12-352a-40bf-b828-4c4d16472eea): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.152297 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-p9nkx" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.522089 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-p9nkx" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.619717 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 22 10:58:15 crc kubenswrapper[4938]: E1122 10:58:15.619864 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n646h544h5c9hb7hbdh5f9h79h67chcch697h688h645h6fh5cbh54fh67hc7h54dh67dh8bh78h94h596h5c8h59ch587h59h58fh548h8fh656hb4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gzf9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(d4e750b6-f8d3-4603-8c8f-bf13f11e079e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:58:15 crc kubenswrapper[4938]: I1122 10:58:15.627741 4938 scope.go:117] "RemoveContainer" containerID="6e908aa00e34cab9366382c70382b3abb605ef9615bfb22effc4180f4f38a939" Nov 22 10:58:15 crc kubenswrapper[4938]: I1122 10:58:15.736635 4938 scope.go:117] "RemoveContainer" containerID="bed7ae043c6fb20d75d5daceb68f38992ec2afc28447d8d23f2312188ad29a9a" Nov 22 10:58:15 crc kubenswrapper[4938]: I1122 10:58:15.776722 4938 scope.go:117] "RemoveContainer" containerID="a6c5504d4e82ebc60278076f0055b48c5966ef6e35797f69681db5d528f8674d" Nov 22 10:58:15 crc kubenswrapper[4938]: I1122 10:58:15.824017 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rsr4t" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: i/o timeout" Nov 22 10:58:15 crc kubenswrapper[4938]: I1122 10:58:15.867179 4938 scope.go:117] "RemoveContainer" containerID="ddaf5f77568ede8f0097270dc83639f3c5c260b3818c6185c44296a3fea73865" Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.161151 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-psj8d"] Nov 22 10:58:16 crc kubenswrapper[4938]: W1122 10:58:16.164234 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4217bc8_0afd_4d83_9e43_41d4adf355df.slice/crio-b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb WatchSource:0}: Error finding container b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb: Status 404 returned error can't find the container with id b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.340147 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:16 crc kubenswrapper[4938]: W1122 10:58:16.370605 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94dceae5_9197_4c0e_98e8_6d85b9576f9f.slice/crio-ac36cfcf60682428c404078d1ae509c462326361c848b534f27852196575595c WatchSource:0}: Error finding container ac36cfcf60682428c404078d1ae509c462326361c848b534f27852196575595c: Status 404 returned error can't find the container with id ac36cfcf60682428c404078d1ae509c462326361c848b534f27852196575595c Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.412436 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb4f8b4bd-qj489"] Nov 22 10:58:16 crc kubenswrapper[4938]: W1122 10:58:16.425374 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52d01853_e609_4339_a336_78e1b9f4f704.slice/crio-032b236e99b0c6a75fa5e9051a33baec865bb982775b92c49d2ea1bf6e86dbe8 WatchSource:0}: Error finding container 032b236e99b0c6a75fa5e9051a33baec865bb982775b92c49d2ea1bf6e86dbe8: Status 404 returned error can't find the container with id 032b236e99b0c6a75fa5e9051a33baec865bb982775b92c49d2ea1bf6e86dbe8 Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.496000 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" path="/var/lib/kubelet/pods/878d8a17-a88c-43ae-930e-01a652f87d2b/volumes" Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.496935 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f42b1674-4892-4907-a0e8-5aab697ff7aa" path="/var/lib/kubelet/pods/f42b1674-4892-4907-a0e8-5aab697ff7aa/volumes" Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.530850 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-psj8d" event={"ID":"e4217bc8-0afd-4d83-9e43-41d4adf355df","Type":"ContainerStarted","Data":"9dcadf332c4dbc3fe26ae6f5741400c64b9a756792a955219ca1d3c0a9336484"} Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.530903 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-psj8d" event={"ID":"e4217bc8-0afd-4d83-9e43-41d4adf355df","Type":"ContainerStarted","Data":"b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb"} Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.535309 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerStarted","Data":"ac36cfcf60682428c404078d1ae509c462326361c848b534f27852196575595c"} Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.554056 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vs774" event={"ID":"973da40a-c63d-4e06-8750-c3d31d8b0abb","Type":"ContainerStarted","Data":"0a71de05781e1589ff17fce11d007b4f1ded630bc326b0b75541472e80c3f670"} Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.554225 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-psj8d" podStartSLOduration=40.554211539 podStartE2EDuration="40.554211539s" podCreationTimestamp="2025-11-22 10:57:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:16.551763327 +0000 UTC m=+1229.019600726" watchObservedRunningTime="2025-11-22 10:58:16.554211539 +0000 UTC m=+1229.022048938" Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.559901 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb4f8b4bd-qj489" event={"ID":"52d01853-e609-4339-a336-78e1b9f4f704","Type":"ContainerStarted","Data":"032b236e99b0c6a75fa5e9051a33baec865bb982775b92c49d2ea1bf6e86dbe8"} Nov 22 10:58:16 crc kubenswrapper[4938]: I1122 10:58:16.569682 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vs774" podStartSLOduration=4.468828199 podStartE2EDuration="49.569667596s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="2025-11-22 10:57:29.171637927 +0000 UTC m=+1181.639475326" lastFinishedPulling="2025-11-22 10:58:14.272477324 +0000 UTC m=+1226.740314723" observedRunningTime="2025-11-22 10:58:16.568919527 +0000 UTC m=+1229.036756926" watchObservedRunningTime="2025-11-22 10:58:16.569667596 +0000 UTC m=+1229.037504995" Nov 22 10:58:17 crc kubenswrapper[4938]: I1122 10:58:17.327286 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:17 crc kubenswrapper[4938]: W1122 10:58:17.339309 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a3a928_92b6_4895_bea7_cec4b72d5f37.slice/crio-83b446f14e74df6aa3ca40ccd38eacbc19eb8ed1cceca32c49072b7da2d714d1 WatchSource:0}: Error finding container 83b446f14e74df6aa3ca40ccd38eacbc19eb8ed1cceca32c49072b7da2d714d1: Status 404 returned error can't find the container with id 83b446f14e74df6aa3ca40ccd38eacbc19eb8ed1cceca32c49072b7da2d714d1 Nov 22 10:58:17 crc kubenswrapper[4938]: I1122 10:58:17.573069 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb4f8b4bd-qj489" event={"ID":"52d01853-e609-4339-a336-78e1b9f4f704","Type":"ContainerStarted","Data":"7d043d7d1351b3748964aef8364ca2a06d902677eb42550979fad0fb99c6b555"} Nov 22 10:58:17 crc kubenswrapper[4938]: I1122 10:58:17.584045 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerStarted","Data":"83b446f14e74df6aa3ca40ccd38eacbc19eb8ed1cceca32c49072b7da2d714d1"} Nov 22 10:58:17 crc kubenswrapper[4938]: I1122 10:58:17.589924 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerStarted","Data":"9170a120837d84ec22a19d0e348475cbbedb5d4562c6c61620572b6e8c133ddc"} Nov 22 10:58:17 crc kubenswrapper[4938]: I1122 10:58:17.592276 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerStarted","Data":"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e"} Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.609965 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb4f8b4bd-qj489" event={"ID":"52d01853-e609-4339-a336-78e1b9f4f704","Type":"ContainerStarted","Data":"d55f3cdcb3cf7568e2be4c381a7b8f30f587dbdee88e868b405567a40d499b86"} Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.614635 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerStarted","Data":"83323798d83355db9cf7fdb613a5c91032ea92fac4e0904bb7421d3c103fdbf9"} Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.616701 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerStarted","Data":"2c36ca61191dd76aa1d63afa14e150acc9bb112795665657f57f22daeac430dd"} Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.616836 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-log" containerID="cri-o://9170a120837d84ec22a19d0e348475cbbedb5d4562c6c61620572b6e8c133ddc" gracePeriod=30 Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.617040 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-httpd" containerID="cri-o://2c36ca61191dd76aa1d63afa14e150acc9bb112795665657f57f22daeac430dd" gracePeriod=30 Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.642372 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7bb4f8b4bd-qj489" podStartSLOduration=40.001737876 podStartE2EDuration="40.642352533s" podCreationTimestamp="2025-11-22 10:57:38 +0000 UTC" firstStartedPulling="2025-11-22 10:58:16.427654038 +0000 UTC m=+1228.895491437" lastFinishedPulling="2025-11-22 10:58:17.068268695 +0000 UTC m=+1229.536106094" observedRunningTime="2025-11-22 10:58:18.642056926 +0000 UTC m=+1231.109894325" watchObservedRunningTime="2025-11-22 10:58:18.642352533 +0000 UTC m=+1231.110189932" Nov 22 10:58:18 crc kubenswrapper[4938]: I1122 10:58:18.662029 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=40.662010896 podStartE2EDuration="40.662010896s" podCreationTimestamp="2025-11-22 10:57:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:18.658320633 +0000 UTC m=+1231.126158032" watchObservedRunningTime="2025-11-22 10:58:18.662010896 +0000 UTC m=+1231.129848295" Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.352074 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.352137 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.625945 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerStarted","Data":"2adc6dac5bbdd7d7b6da9d9b251f7dc18c253fd40f976654287b583725361f59"} Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.626062 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-log" containerID="cri-o://83323798d83355db9cf7fdb613a5c91032ea92fac4e0904bb7421d3c103fdbf9" gracePeriod=30 Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.626121 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-httpd" containerID="cri-o://2adc6dac5bbdd7d7b6da9d9b251f7dc18c253fd40f976654287b583725361f59" gracePeriod=30 Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.635528 4938 generic.go:334] "Generic (PLEG): container finished" podID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerID="2c36ca61191dd76aa1d63afa14e150acc9bb112795665657f57f22daeac430dd" exitCode=0 Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.635564 4938 generic.go:334] "Generic (PLEG): container finished" podID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerID="9170a120837d84ec22a19d0e348475cbbedb5d4562c6c61620572b6e8c133ddc" exitCode=143 Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.635665 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerDied","Data":"2c36ca61191dd76aa1d63afa14e150acc9bb112795665657f57f22daeac430dd"} Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.635731 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerDied","Data":"9170a120837d84ec22a19d0e348475cbbedb5d4562c6c61620572b6e8c133ddc"} Nov 22 10:58:19 crc kubenswrapper[4938]: I1122 10:58:19.647990 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=41.647969233 podStartE2EDuration="41.647969233s" podCreationTimestamp="2025-11-22 10:57:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:19.646037425 +0000 UTC m=+1232.113874824" watchObservedRunningTime="2025-11-22 10:58:19.647969233 +0000 UTC m=+1232.115806632" Nov 22 10:58:20 crc kubenswrapper[4938]: I1122 10:58:20.647925 4938 generic.go:334] "Generic (PLEG): container finished" podID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerID="2adc6dac5bbdd7d7b6da9d9b251f7dc18c253fd40f976654287b583725361f59" exitCode=0 Nov 22 10:58:20 crc kubenswrapper[4938]: I1122 10:58:20.648237 4938 generic.go:334] "Generic (PLEG): container finished" podID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerID="83323798d83355db9cf7fdb613a5c91032ea92fac4e0904bb7421d3c103fdbf9" exitCode=143 Nov 22 10:58:20 crc kubenswrapper[4938]: I1122 10:58:20.647967 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerDied","Data":"2adc6dac5bbdd7d7b6da9d9b251f7dc18c253fd40f976654287b583725361f59"} Nov 22 10:58:20 crc kubenswrapper[4938]: I1122 10:58:20.648357 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerDied","Data":"83323798d83355db9cf7fdb613a5c91032ea92fac4e0904bb7421d3c103fdbf9"} Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.104005 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.221980 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.222602 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.222734 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223047 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223313 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223378 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223417 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckddr\" (UniqueName: \"kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223441 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs\") pod \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\" (UID: \"94dceae5-9197-4c0e-98e8-6d85b9576f9f\") " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.223948 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.224030 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs" (OuterVolumeSpecName: "logs") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.228283 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.228624 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts" (OuterVolumeSpecName: "scripts") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.242106 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr" (OuterVolumeSpecName: "kube-api-access-ckddr") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "kube-api-access-ckddr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.264156 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.281127 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data" (OuterVolumeSpecName: "config-data") pod "94dceae5-9197-4c0e-98e8-6d85b9576f9f" (UID: "94dceae5-9197-4c0e-98e8-6d85b9576f9f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.325933 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.325981 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.325991 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.326016 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94dceae5-9197-4c0e-98e8-6d85b9576f9f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.326029 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckddr\" (UniqueName: \"kubernetes.io/projected/94dceae5-9197-4c0e-98e8-6d85b9576f9f-kube-api-access-ckddr\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.326038 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/94dceae5-9197-4c0e-98e8-6d85b9576f9f-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.346280 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.427829 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.666996 4938 generic.go:334] "Generic (PLEG): container finished" podID="e4217bc8-0afd-4d83-9e43-41d4adf355df" containerID="9dcadf332c4dbc3fe26ae6f5741400c64b9a756792a955219ca1d3c0a9336484" exitCode=0 Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.667053 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-psj8d" event={"ID":"e4217bc8-0afd-4d83-9e43-41d4adf355df","Type":"ContainerDied","Data":"9dcadf332c4dbc3fe26ae6f5741400c64b9a756792a955219ca1d3c0a9336484"} Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.671295 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"94dceae5-9197-4c0e-98e8-6d85b9576f9f","Type":"ContainerDied","Data":"ac36cfcf60682428c404078d1ae509c462326361c848b534f27852196575595c"} Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.671330 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.671340 4938 scope.go:117] "RemoveContainer" containerID="2c36ca61191dd76aa1d63afa14e150acc9bb112795665657f57f22daeac430dd" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.705579 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.712239 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727371 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:22 crc kubenswrapper[4938]: E1122 10:58:22.727693 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-log" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727709 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-log" Nov 22 10:58:22 crc kubenswrapper[4938]: E1122 10:58:22.727721 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="init" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727728 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="init" Nov 22 10:58:22 crc kubenswrapper[4938]: E1122 10:58:22.727738 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727744 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" Nov 22 10:58:22 crc kubenswrapper[4938]: E1122 10:58:22.727757 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-httpd" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727763 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-httpd" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727976 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-log" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.727990 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="878d8a17-a88c-43ae-930e-01a652f87d2b" containerName="dnsmasq-dns" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.728005 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" containerName="glance-httpd" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.728852 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.731826 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.733551 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.753745 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838196 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838264 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838312 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838334 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838370 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838418 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838444 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.838471 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s9sk\" (UniqueName: \"kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940135 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940439 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940460 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s9sk\" (UniqueName: \"kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940489 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940516 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940557 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940578 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940615 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.940873 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.945323 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.946159 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.947818 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.950409 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.950635 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.954639 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.969101 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s9sk\" (UniqueName: \"kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:22 crc kubenswrapper[4938]: I1122 10:58:22.971646 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:23 crc kubenswrapper[4938]: I1122 10:58:23.056575 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:23 crc kubenswrapper[4938]: I1122 10:58:23.680467 4938 generic.go:334] "Generic (PLEG): container finished" podID="973da40a-c63d-4e06-8750-c3d31d8b0abb" containerID="0a71de05781e1589ff17fce11d007b4f1ded630bc326b0b75541472e80c3f670" exitCode=0 Nov 22 10:58:23 crc kubenswrapper[4938]: I1122 10:58:23.680644 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vs774" event={"ID":"973da40a-c63d-4e06-8750-c3d31d8b0abb","Type":"ContainerDied","Data":"0a71de05781e1589ff17fce11d007b4f1ded630bc326b0b75541472e80c3f670"} Nov 22 10:58:24 crc kubenswrapper[4938]: I1122 10:58:24.458009 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94dceae5-9197-4c0e-98e8-6d85b9576f9f" path="/var/lib/kubelet/pods/94dceae5-9197-4c0e-98e8-6d85b9576f9f/volumes" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.526308 4938 scope.go:117] "RemoveContainer" containerID="9170a120837d84ec22a19d0e348475cbbedb5d4562c6c61620572b6e8c133ddc" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.701932 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.708781 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-psj8d" event={"ID":"e4217bc8-0afd-4d83-9e43-41d4adf355df","Type":"ContainerDied","Data":"b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb"} Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.708978 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b30e1726d8f0affe5d43815ec376d6f7869b1f93105f17a325f8642887ad51fb" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.709119 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-psj8d" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.715720 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vs774" event={"ID":"973da40a-c63d-4e06-8750-c3d31d8b0abb","Type":"ContainerDied","Data":"be112e98bff5a6bd83dc0f676fbf80e72d637c0ea9b016b10849674e13e43347"} Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.715764 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be112e98bff5a6bd83dc0f676fbf80e72d637c0ea9b016b10849674e13e43347" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.716346 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.723487 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5a3a928-92b6-4895-bea7-cec4b72d5f37","Type":"ContainerDied","Data":"83b446f14e74df6aa3ca40ccd38eacbc19eb8ed1cceca32c49072b7da2d714d1"} Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.723531 4938 scope.go:117] "RemoveContainer" containerID="2adc6dac5bbdd7d7b6da9d9b251f7dc18c253fd40f976654287b583725361f59" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.755331 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vs774" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.762050 4938 scope.go:117] "RemoveContainer" containerID="83323798d83355db9cf7fdb613a5c91032ea92fac4e0904bb7421d3c103fdbf9" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.812847 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvkdk\" (UniqueName: \"kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk\") pod \"973da40a-c63d-4e06-8750-c3d31d8b0abb\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.812897 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle\") pod \"973da40a-c63d-4e06-8750-c3d31d8b0abb\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.812931 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.812996 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813026 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813060 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813100 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813121 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813143 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813164 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts\") pod \"973da40a-c63d-4e06-8750-c3d31d8b0abb\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813197 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813213 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmgc2\" (UniqueName: \"kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813241 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm8dr\" (UniqueName: \"kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr\") pod \"e4217bc8-0afd-4d83-9e43-41d4adf355df\" (UID: \"e4217bc8-0afd-4d83-9e43-41d4adf355df\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813273 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813295 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813313 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs\") pod \"973da40a-c63d-4e06-8750-c3d31d8b0abb\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813340 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts\") pod \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\" (UID: \"f5a3a928-92b6-4895-bea7-cec4b72d5f37\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.813366 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data\") pod \"973da40a-c63d-4e06-8750-c3d31d8b0abb\" (UID: \"973da40a-c63d-4e06-8750-c3d31d8b0abb\") " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.814266 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs" (OuterVolumeSpecName: "logs") pod "973da40a-c63d-4e06-8750-c3d31d8b0abb" (UID: "973da40a-c63d-4e06-8750-c3d31d8b0abb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.814269 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.815664 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs" (OuterVolumeSpecName: "logs") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.820860 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.820944 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk" (OuterVolumeSpecName: "kube-api-access-zvkdk") pod "973da40a-c63d-4e06-8750-c3d31d8b0abb" (UID: "973da40a-c63d-4e06-8750-c3d31d8b0abb"). InnerVolumeSpecName "kube-api-access-zvkdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.821830 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2" (OuterVolumeSpecName: "kube-api-access-dmgc2") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "kube-api-access-dmgc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.822479 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts" (OuterVolumeSpecName: "scripts") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.823199 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts" (OuterVolumeSpecName: "scripts") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.824689 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.825380 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr" (OuterVolumeSpecName: "kube-api-access-fm8dr") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "kube-api-access-fm8dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.826051 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.854409 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts" (OuterVolumeSpecName: "scripts") pod "973da40a-c63d-4e06-8750-c3d31d8b0abb" (UID: "973da40a-c63d-4e06-8750-c3d31d8b0abb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.857496 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.870149 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.871461 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data" (OuterVolumeSpecName: "config-data") pod "973da40a-c63d-4e06-8750-c3d31d8b0abb" (UID: "973da40a-c63d-4e06-8750-c3d31d8b0abb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.888760 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data" (OuterVolumeSpecName: "config-data") pod "f5a3a928-92b6-4895-bea7-cec4b72d5f37" (UID: "f5a3a928-92b6-4895-bea7-cec4b72d5f37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.891871 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data" (OuterVolumeSpecName: "config-data") pod "e4217bc8-0afd-4d83-9e43-41d4adf355df" (UID: "e4217bc8-0afd-4d83-9e43-41d4adf355df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.895568 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "973da40a-c63d-4e06-8750-c3d31d8b0abb" (UID: "973da40a-c63d-4e06-8750-c3d31d8b0abb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914742 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914779 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914791 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914800 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914807 4938 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914818 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914826 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914835 4938 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914843 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmgc2\" (UniqueName: \"kubernetes.io/projected/f5a3a928-92b6-4895-bea7-cec4b72d5f37-kube-api-access-dmgc2\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914852 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm8dr\" (UniqueName: \"kubernetes.io/projected/e4217bc8-0afd-4d83-9e43-41d4adf355df-kube-api-access-fm8dr\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914860 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5a3a928-92b6-4895-bea7-cec4b72d5f37-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914868 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914877 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/973da40a-c63d-4e06-8750-c3d31d8b0abb-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914885 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3a928-92b6-4895-bea7-cec4b72d5f37-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914892 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914902 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvkdk\" (UniqueName: \"kubernetes.io/projected/973da40a-c63d-4e06-8750-c3d31d8b0abb-kube-api-access-zvkdk\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914927 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/973da40a-c63d-4e06-8750-c3d31d8b0abb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.914938 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4217bc8-0afd-4d83-9e43-41d4adf355df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4938]: I1122 10:58:26.949456 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.010542 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.016456 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4938]: W1122 10:58:27.078716 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd29dc9e2_9a34_4127_95ec_100c5483b53c.slice/crio-3e8b2f913c7e0da1ba6d3343eb52518d64dc4758fca14b4398e9b3289fdc2c16 WatchSource:0}: Error finding container 3e8b2f913c7e0da1ba6d3343eb52518d64dc4758fca14b4398e9b3289fdc2c16: Status 404 returned error can't find the container with id 3e8b2f913c7e0da1ba6d3343eb52518d64dc4758fca14b4398e9b3289fdc2c16 Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.771295 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerStarted","Data":"bb99bce6487ce815a7c8d32fd09b41dee4ef133b654c9b9497375a1cd73f1a6b"} Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.771850 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerStarted","Data":"2d2f992d56e048a9a6b64125a55a634ae075ae2da25c37cfa892307d7c8fb3e1"} Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.777243 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerStarted","Data":"ddf5625e8f5b75203a221aaefdde98986607e3019b1c39cfec3f0949198e39a6"} Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.777304 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerStarted","Data":"3e8b2f913c7e0da1ba6d3343eb52518d64dc4758fca14b4398e9b3289fdc2c16"} Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.778547 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vs774" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.778567 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.806078 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-84db767bd9-qw4kz" podStartSLOduration=-9223371979.04872 podStartE2EDuration="57.806057193s" podCreationTimestamp="2025-11-22 10:57:30 +0000 UTC" firstStartedPulling="2025-11-22 10:57:31.57507027 +0000 UTC m=+1184.042907669" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:27.799306474 +0000 UTC m=+1240.267143873" watchObservedRunningTime="2025-11-22 10:58:27.806057193 +0000 UTC m=+1240.273894592" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.888781 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5d64bdcd96-8b525"] Nov 22 10:58:27 crc kubenswrapper[4938]: E1122 10:58:27.889321 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4217bc8-0afd-4d83-9e43-41d4adf355df" containerName="keystone-bootstrap" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889345 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4217bc8-0afd-4d83-9e43-41d4adf355df" containerName="keystone-bootstrap" Nov 22 10:58:27 crc kubenswrapper[4938]: E1122 10:58:27.889354 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-httpd" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889360 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-httpd" Nov 22 10:58:27 crc kubenswrapper[4938]: E1122 10:58:27.889389 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-log" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889396 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-log" Nov 22 10:58:27 crc kubenswrapper[4938]: E1122 10:58:27.889403 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="973da40a-c63d-4e06-8750-c3d31d8b0abb" containerName="placement-db-sync" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889409 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="973da40a-c63d-4e06-8750-c3d31d8b0abb" containerName="placement-db-sync" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889630 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-log" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889642 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4217bc8-0afd-4d83-9e43-41d4adf355df" containerName="keystone-bootstrap" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889656 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" containerName="glance-httpd" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.889670 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="973da40a-c63d-4e06-8750-c3d31d8b0abb" containerName="placement-db-sync" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.890663 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.892500 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.895165 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.895478 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-2bhmb" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.895733 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.896356 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.900008 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6bf8df76c8-5c2xm"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.901441 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.906667 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.910156 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.910336 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-ct79k" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.910484 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.910639 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.910812 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.914843 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.917976 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.924061 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5d64bdcd96-8b525"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.930702 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bf8df76c8-5c2xm"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.981016 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.982862 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.991198 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:58:27 crc kubenswrapper[4938]: I1122 10:58:27.991494 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.013526 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051068 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp5t9\" (UniqueName: \"kubernetes.io/projected/26b58116-00b3-49d5-bf76-d262754d9cfb-kube-api-access-fp5t9\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051189 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-internal-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051384 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-fernet-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051493 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq2jc\" (UniqueName: \"kubernetes.io/projected/70cbbc4a-ed36-471c-9b65-5eea9fc87891-kube-api-access-tq2jc\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051520 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70cbbc4a-ed36-471c-9b65-5eea9fc87891-logs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051538 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-combined-ca-bundle\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051608 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-scripts\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051646 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-public-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051670 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-credential-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051695 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-internal-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051739 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-public-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051823 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-config-data\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051852 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-scripts\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051890 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-combined-ca-bundle\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.051957 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-config-data\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153535 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp5t9\" (UniqueName: \"kubernetes.io/projected/26b58116-00b3-49d5-bf76-d262754d9cfb-kube-api-access-fp5t9\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153627 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-internal-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153683 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-fernet-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153715 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dkhd\" (UniqueName: \"kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153760 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq2jc\" (UniqueName: \"kubernetes.io/projected/70cbbc4a-ed36-471c-9b65-5eea9fc87891-kube-api-access-tq2jc\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153804 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70cbbc4a-ed36-471c-9b65-5eea9fc87891-logs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.153876 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-combined-ca-bundle\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155496 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-scripts\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155581 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-public-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155619 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-credential-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155650 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-internal-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155676 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-public-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155720 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155775 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155798 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155854 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-config-data\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155883 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-scripts\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155930 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155963 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-combined-ca-bundle\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.155998 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-config-data\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.156067 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.156159 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.156187 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.159504 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70cbbc4a-ed36-471c-9b65-5eea9fc87891-logs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.160521 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-internal-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.161512 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-public-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.166188 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-scripts\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.166666 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-public-tls-certs\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.172756 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-credential-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.173780 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-fernet-keys\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.176616 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-scripts\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.176714 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-config-data\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.176757 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp5t9\" (UniqueName: \"kubernetes.io/projected/26b58116-00b3-49d5-bf76-d262754d9cfb-kube-api-access-fp5t9\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.177830 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq2jc\" (UniqueName: \"kubernetes.io/projected/70cbbc4a-ed36-471c-9b65-5eea9fc87891-kube-api-access-tq2jc\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.179067 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-config-data\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.180435 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-internal-tls-certs\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.183363 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70cbbc4a-ed36-471c-9b65-5eea9fc87891-combined-ca-bundle\") pod \"placement-5d64bdcd96-8b525\" (UID: \"70cbbc4a-ed36-471c-9b65-5eea9fc87891\") " pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.189658 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26b58116-00b3-49d5-bf76-d262754d9cfb-combined-ca-bundle\") pod \"keystone-6bf8df76c8-5c2xm\" (UID: \"26b58116-00b3-49d5-bf76-d262754d9cfb\") " pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.221370 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.245340 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257635 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dkhd\" (UniqueName: \"kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257719 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257745 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257759 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257785 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257862 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.257880 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.258394 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.259323 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.259745 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.266106 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.266514 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.277028 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.280902 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dkhd\" (UniqueName: \"kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.287658 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.314070 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.327170 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.478206 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a3a928-92b6-4895-bea7-cec4b72d5f37" path="/var/lib/kubelet/pods/f5a3a928-92b6-4895-bea7-cec4b72d5f37/volumes" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.799957 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerStarted","Data":"fb76004ce1761d61653c70811b5cfea9222d2c30584753513dcca537b1acfe18"} Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.806092 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bf8df76c8-5c2xm"] Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.811629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p9nkx" event={"ID":"c795cb12-352a-40bf-b828-4c4d16472eea","Type":"ContainerStarted","Data":"3ecdb9a9624006bc482603fda8692c2a9a85fcc542d3c6c18df6662201098e61"} Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.856195 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerStarted","Data":"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51"} Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.877083 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.8770624399999996 podStartE2EDuration="6.87706244s" podCreationTimestamp="2025-11-22 10:58:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:28.856601358 +0000 UTC m=+1241.324438757" watchObservedRunningTime="2025-11-22 10:58:28.87706244 +0000 UTC m=+1241.344899839" Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.903167 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5d64bdcd96-8b525"] Nov 22 10:58:28 crc kubenswrapper[4938]: I1122 10:58:28.904410 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-p9nkx" podStartSLOduration=4.869877804 podStartE2EDuration="1m1.904395105s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="2025-11-22 10:57:30.614364415 +0000 UTC m=+1183.082201814" lastFinishedPulling="2025-11-22 10:58:27.648881716 +0000 UTC m=+1240.116719115" observedRunningTime="2025-11-22 10:58:28.876823804 +0000 UTC m=+1241.344661193" watchObservedRunningTime="2025-11-22 10:58:28.904395105 +0000 UTC m=+1241.372232504" Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.102738 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:29 crc kubenswrapper[4938]: W1122 10:58:29.120983 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb1f8068_a414_40bf_984e_2a8a44d2ce07.slice/crio-1d427597fa84eeedaf8135626ff3ebde2109935aa0724af484b34afdd092ee3d WatchSource:0}: Error finding container 1d427597fa84eeedaf8135626ff3ebde2109935aa0724af484b34afdd092ee3d: Status 404 returned error can't find the container with id 1d427597fa84eeedaf8135626ff3ebde2109935aa0724af484b34afdd092ee3d Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.353805 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7bb4f8b4bd-qj489" podUID="52d01853-e609-4339-a336-78e1b9f4f704" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.869212 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5d64bdcd96-8b525" event={"ID":"70cbbc4a-ed36-471c-9b65-5eea9fc87891","Type":"ContainerStarted","Data":"09eaf2ceb6ecb8cf750e0d6dc6d3e040ded5fb8c2e82abd28eff34916376374f"} Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.869528 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5d64bdcd96-8b525" event={"ID":"70cbbc4a-ed36-471c-9b65-5eea9fc87891","Type":"ContainerStarted","Data":"77ddee9693a77fc83f1d7edb492ad4a67b5c9e56b40170b31fd512a6cef5eaa0"} Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.874567 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bf8df76c8-5c2xm" event={"ID":"26b58116-00b3-49d5-bf76-d262754d9cfb","Type":"ContainerStarted","Data":"1fdc57008e4f6b76186a064563493c38d8d2aab89b8960c6d10c1fdebffdbf32"} Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.874595 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bf8df76c8-5c2xm" event={"ID":"26b58116-00b3-49d5-bf76-d262754d9cfb","Type":"ContainerStarted","Data":"ed48b3be2540c4a6f3895607104621aae3d2938b3964f2dba6d5cb783b8a101a"} Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.875795 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:58:29 crc kubenswrapper[4938]: I1122 10:58:29.882671 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerStarted","Data":"1d427597fa84eeedaf8135626ff3ebde2109935aa0724af484b34afdd092ee3d"} Nov 22 10:58:30 crc kubenswrapper[4938]: I1122 10:58:30.754049 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:58:30 crc kubenswrapper[4938]: I1122 10:58:30.754464 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:58:31 crc kubenswrapper[4938]: I1122 10:58:31.902110 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5d64bdcd96-8b525" event={"ID":"70cbbc4a-ed36-471c-9b65-5eea9fc87891","Type":"ContainerStarted","Data":"350e2310db52a84391380580c0ebddb14f48ea2ba99e38db6f33e7dc9c2eef60"} Nov 22 10:58:31 crc kubenswrapper[4938]: I1122 10:58:31.905477 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerStarted","Data":"10667d768b684e8257be7d5e1e8519ba223853cbc1661fb52f4c310bd1e3f4d8"} Nov 22 10:58:32 crc kubenswrapper[4938]: I1122 10:58:32.913354 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:32 crc kubenswrapper[4938]: I1122 10:58:32.913410 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:58:32 crc kubenswrapper[4938]: I1122 10:58:32.932556 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6bf8df76c8-5c2xm" podStartSLOduration=5.932541444 podStartE2EDuration="5.932541444s" podCreationTimestamp="2025-11-22 10:58:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:29.898263119 +0000 UTC m=+1242.366100508" watchObservedRunningTime="2025-11-22 10:58:32.932541444 +0000 UTC m=+1245.400378843" Nov 22 10:58:32 crc kubenswrapper[4938]: I1122 10:58:32.936554 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5d64bdcd96-8b525" podStartSLOduration=5.936541204 podStartE2EDuration="5.936541204s" podCreationTimestamp="2025-11-22 10:58:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:32.93119237 +0000 UTC m=+1245.399029759" watchObservedRunningTime="2025-11-22 10:58:32.936541204 +0000 UTC m=+1245.404378603" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.057272 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.057341 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.084841 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.111412 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.923011 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerStarted","Data":"8591f8da93d80894ea2b01c1c54cfefd76185e5f99720f6642c9d78890600653"} Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.924869 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-ddndr" event={"ID":"745ffa75-881b-4c0d-8f61-70d872617409","Type":"ContainerStarted","Data":"3a412995d5c50d3471d0cd86652e95fc96f87a1488096ee3caf8f67ee195d9a5"} Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.925219 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.925278 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.972317 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.972300279 podStartE2EDuration="6.972300279s" podCreationTimestamp="2025-11-22 10:58:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:33.952022061 +0000 UTC m=+1246.419859500" watchObservedRunningTime="2025-11-22 10:58:33.972300279 +0000 UTC m=+1246.440137678" Nov 22 10:58:33 crc kubenswrapper[4938]: I1122 10:58:33.972642 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-ddndr" podStartSLOduration=4.69243578 podStartE2EDuration="1m6.972636967s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="2025-11-22 10:57:28.99409618 +0000 UTC m=+1181.461933579" lastFinishedPulling="2025-11-22 10:58:31.274297367 +0000 UTC m=+1243.742134766" observedRunningTime="2025-11-22 10:58:33.968615356 +0000 UTC m=+1246.436452755" watchObservedRunningTime="2025-11-22 10:58:33.972636967 +0000 UTC m=+1246.440474366" Nov 22 10:58:36 crc kubenswrapper[4938]: I1122 10:58:36.513533 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:36 crc kubenswrapper[4938]: I1122 10:58:36.514196 4938 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:58:36 crc kubenswrapper[4938]: I1122 10:58:36.540415 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.327357 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.327782 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.373411 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.388399 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.977200 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:58:38 crc kubenswrapper[4938]: I1122 10:58:38.977257 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:58:40 crc kubenswrapper[4938]: I1122 10:58:40.755680 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-84db767bd9-qw4kz" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon" probeResult="failure" output="Get \"http://10.217.0.145:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8080: connect: connection refused" Nov 22 10:58:40 crc kubenswrapper[4938]: I1122 10:58:40.960029 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:58:41 crc kubenswrapper[4938]: I1122 10:58:41.049040 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:58:42 crc kubenswrapper[4938]: I1122 10:58:42.190099 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:58:43 crc kubenswrapper[4938]: I1122 10:58:43.917694 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7bb4f8b4bd-qj489" Nov 22 10:58:43 crc kubenswrapper[4938]: I1122 10:58:43.974067 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:58:43 crc kubenswrapper[4938]: I1122 10:58:43.974348 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84db767bd9-qw4kz" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon-log" containerID="cri-o://2d2f992d56e048a9a6b64125a55a634ae075ae2da25c37cfa892307d7c8fb3e1" gracePeriod=30 Nov 22 10:58:43 crc kubenswrapper[4938]: I1122 10:58:43.974391 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84db767bd9-qw4kz" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon" containerID="cri-o://bb99bce6487ce815a7c8d32fd09b41dee4ef133b654c9b9497375a1cd73f1a6b" gracePeriod=30 Nov 22 10:58:47 crc kubenswrapper[4938]: E1122 10:58:47.964605 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.169081 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerStarted","Data":"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895"} Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.169274 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="ceilometer-notification-agent" containerID="cri-o://049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" gracePeriod=30 Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.169544 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.169852 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="proxy-httpd" containerID="cri-o://c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" gracePeriod=30 Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.169960 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="sg-core" containerID="cri-o://0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" gracePeriod=30 Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.697873 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753336 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753406 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753437 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzf9c\" (UniqueName: \"kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753496 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753524 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753577 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.753616 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle\") pod \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\" (UID: \"d4e750b6-f8d3-4603-8c8f-bf13f11e079e\") " Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.758421 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.761370 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.761604 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c" (OuterVolumeSpecName: "kube-api-access-gzf9c") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "kube-api-access-gzf9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.763250 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts" (OuterVolumeSpecName: "scripts") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.783439 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.810115 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.836341 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data" (OuterVolumeSpecName: "config-data") pod "d4e750b6-f8d3-4603-8c8f-bf13f11e079e" (UID: "d4e750b6-f8d3-4603-8c8f-bf13f11e079e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859697 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859750 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859768 4938 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859779 4938 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859792 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzf9c\" (UniqueName: \"kubernetes.io/projected/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-kube-api-access-gzf9c\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859803 4938 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4938]: I1122 10:58:48.859812 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4e750b6-f8d3-4603-8c8f-bf13f11e079e-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179734 4938 generic.go:334] "Generic (PLEG): container finished" podID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" exitCode=0 Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179785 4938 generic.go:334] "Generic (PLEG): container finished" podID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" exitCode=2 Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179795 4938 generic.go:334] "Generic (PLEG): container finished" podID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" exitCode=0 Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179792 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerDied","Data":"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895"} Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179854 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerDied","Data":"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51"} Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179854 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179881 4938 scope.go:117] "RemoveContainer" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.179869 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerDied","Data":"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e"} Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.180039 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d4e750b6-f8d3-4603-8c8f-bf13f11e079e","Type":"ContainerDied","Data":"7af5990fa7862e6bc8edb36ee82d59949c5f752190f1ef9b79e02ffebc6dea3e"} Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.204659 4938 scope.go:117] "RemoveContainer" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.243532 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.247090 4938 scope.go:117] "RemoveContainer" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.248885 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.272901 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.273343 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273365 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.273404 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273411 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.273422 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273429 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273602 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273615 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.273629 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.275181 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.277464 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.277613 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.281292 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.299533 4938 scope.go:117] "RemoveContainer" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.304329 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": container with ID starting with c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895 not found: ID does not exist" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.304384 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895"} err="failed to get container status \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": rpc error: code = NotFound desc = could not find container \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": container with ID starting with c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.304412 4938 scope.go:117] "RemoveContainer" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.304941 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": container with ID starting with 0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51 not found: ID does not exist" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.305017 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51"} err="failed to get container status \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": rpc error: code = NotFound desc = could not find container \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": container with ID starting with 0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.305047 4938 scope.go:117] "RemoveContainer" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" Nov 22 10:58:49 crc kubenswrapper[4938]: E1122 10:58:49.306114 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": container with ID starting with 049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e not found: ID does not exist" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.306151 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e"} err="failed to get container status \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": rpc error: code = NotFound desc = could not find container \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": container with ID starting with 049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.306177 4938 scope.go:117] "RemoveContainer" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.307031 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895"} err="failed to get container status \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": rpc error: code = NotFound desc = could not find container \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": container with ID starting with c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.307209 4938 scope.go:117] "RemoveContainer" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.307882 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51"} err="failed to get container status \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": rpc error: code = NotFound desc = could not find container \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": container with ID starting with 0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.307904 4938 scope.go:117] "RemoveContainer" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.308362 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e"} err="failed to get container status \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": rpc error: code = NotFound desc = could not find container \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": container with ID starting with 049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.308437 4938 scope.go:117] "RemoveContainer" containerID="c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.308740 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895"} err="failed to get container status \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": rpc error: code = NotFound desc = could not find container \"c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895\": container with ID starting with c62f0c15d0045dd507322c675107e3eda495a5bbb6ca4d14e255623680f2b895 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.308772 4938 scope.go:117] "RemoveContainer" containerID="0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.309035 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51"} err="failed to get container status \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": rpc error: code = NotFound desc = could not find container \"0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51\": container with ID starting with 0a796dc6834d00a45e117ee864f610ea67b3f74f50290b8485201a6280e96c51 not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.309065 4938 scope.go:117] "RemoveContainer" containerID="049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.309303 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e"} err="failed to get container status \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": rpc error: code = NotFound desc = could not find container \"049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e\": container with ID starting with 049f557d5f44f40df2979fdf1008a6129c6c4735fdd95749d01a262866b32e7e not found: ID does not exist" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369411 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gksx4\" (UniqueName: \"kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369465 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369520 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369615 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369742 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369822 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.369883 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.471889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472360 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472405 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472438 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472474 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gksx4\" (UniqueName: \"kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472507 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.472573 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.473204 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.473461 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.476048 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.476136 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.476238 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.479475 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.490636 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gksx4\" (UniqueName: \"kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4\") pod \"ceilometer-0\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4938]: I1122 10:58:49.602677 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:50 crc kubenswrapper[4938]: I1122 10:58:50.041237 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:50 crc kubenswrapper[4938]: W1122 10:58:50.050223 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12962edb_6a81_4491_8ae2_85efe7102284.slice/crio-acb3b45e3be4e59fbb88244e35b2042c5652e40a5e2dd96fbcb5e29e06a71305 WatchSource:0}: Error finding container acb3b45e3be4e59fbb88244e35b2042c5652e40a5e2dd96fbcb5e29e06a71305: Status 404 returned error can't find the container with id acb3b45e3be4e59fbb88244e35b2042c5652e40a5e2dd96fbcb5e29e06a71305 Nov 22 10:58:50 crc kubenswrapper[4938]: I1122 10:58:50.189826 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerStarted","Data":"acb3b45e3be4e59fbb88244e35b2042c5652e40a5e2dd96fbcb5e29e06a71305"} Nov 22 10:58:50 crc kubenswrapper[4938]: I1122 10:58:50.457862 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4e750b6-f8d3-4603-8c8f-bf13f11e079e" path="/var/lib/kubelet/pods/d4e750b6-f8d3-4603-8c8f-bf13f11e079e/volumes" Nov 22 10:58:51 crc kubenswrapper[4938]: I1122 10:58:51.200764 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerStarted","Data":"c9ab59bc6f8122d1a745c1e14123d3595e9efcc4b08fb7eea8af7ad85061f75d"} Nov 22 10:58:52 crc kubenswrapper[4938]: I1122 10:58:52.211147 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerStarted","Data":"806e9b3c1459f2976be2e86e8e13e39d6942530ce51145e93a9031cb96c9ab2b"} Nov 22 10:58:52 crc kubenswrapper[4938]: I1122 10:58:52.213294 4938 generic.go:334] "Generic (PLEG): container finished" podID="c795cb12-352a-40bf-b828-4c4d16472eea" containerID="3ecdb9a9624006bc482603fda8692c2a9a85fcc542d3c6c18df6662201098e61" exitCode=0 Nov 22 10:58:52 crc kubenswrapper[4938]: I1122 10:58:52.213346 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p9nkx" event={"ID":"c795cb12-352a-40bf-b828-4c4d16472eea","Type":"ContainerDied","Data":"3ecdb9a9624006bc482603fda8692c2a9a85fcc542d3c6c18df6662201098e61"} Nov 22 10:58:52 crc kubenswrapper[4938]: I1122 10:58:52.215070 4938 generic.go:334] "Generic (PLEG): container finished" podID="55be79ce-3816-4644-9e33-3762615249e3" containerID="bb99bce6487ce815a7c8d32fd09b41dee4ef133b654c9b9497375a1cd73f1a6b" exitCode=1 Nov 22 10:58:52 crc kubenswrapper[4938]: I1122 10:58:52.215097 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerDied","Data":"bb99bce6487ce815a7c8d32fd09b41dee4ef133b654c9b9497375a1cd73f1a6b"} Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.231652 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerStarted","Data":"2c28d27fbd9e0a4b8deea8ae9420b5fd5d5b141cd99c146a17e89bbfcbcb8d97"} Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.586033 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.674942 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle\") pod \"c795cb12-352a-40bf-b828-4c4d16472eea\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.675005 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvmgz\" (UniqueName: \"kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz\") pod \"c795cb12-352a-40bf-b828-4c4d16472eea\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.675047 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data\") pod \"c795cb12-352a-40bf-b828-4c4d16472eea\" (UID: \"c795cb12-352a-40bf-b828-4c4d16472eea\") " Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.680927 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c795cb12-352a-40bf-b828-4c4d16472eea" (UID: "c795cb12-352a-40bf-b828-4c4d16472eea"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.682167 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz" (OuterVolumeSpecName: "kube-api-access-gvmgz") pod "c795cb12-352a-40bf-b828-4c4d16472eea" (UID: "c795cb12-352a-40bf-b828-4c4d16472eea"). InnerVolumeSpecName "kube-api-access-gvmgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.706843 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c795cb12-352a-40bf-b828-4c4d16472eea" (UID: "c795cb12-352a-40bf-b828-4c4d16472eea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.777436 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.777991 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvmgz\" (UniqueName: \"kubernetes.io/projected/c795cb12-352a-40bf-b828-4c4d16472eea-kube-api-access-gvmgz\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:53 crc kubenswrapper[4938]: I1122 10:58:53.778004 4938 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c795cb12-352a-40bf-b828-4c4d16472eea-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.244601 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerStarted","Data":"4e7803c1f3f27e0b753d33402a9fcf9c26ef0b2cabbeb21a566a734c28b67556"} Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.244732 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.248265 4938 generic.go:334] "Generic (PLEG): container finished" podID="745ffa75-881b-4c0d-8f61-70d872617409" containerID="3a412995d5c50d3471d0cd86652e95fc96f87a1488096ee3caf8f67ee195d9a5" exitCode=0 Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.248328 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-ddndr" event={"ID":"745ffa75-881b-4c0d-8f61-70d872617409","Type":"ContainerDied","Data":"3a412995d5c50d3471d0cd86652e95fc96f87a1488096ee3caf8f67ee195d9a5"} Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.250411 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p9nkx" event={"ID":"c795cb12-352a-40bf-b828-4c4d16472eea","Type":"ContainerDied","Data":"7e90d233a457e16fa0f62af0461afc3af36605a6de0cb371a90025c5aafe6c3b"} Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.250439 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e90d233a457e16fa0f62af0461afc3af36605a6de0cb371a90025c5aafe6c3b" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.250491 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p9nkx" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.273677 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.96731377 podStartE2EDuration="5.27365509s" podCreationTimestamp="2025-11-22 10:58:49 +0000 UTC" firstStartedPulling="2025-11-22 10:58:50.053997364 +0000 UTC m=+1262.521834763" lastFinishedPulling="2025-11-22 10:58:53.360338674 +0000 UTC m=+1265.828176083" observedRunningTime="2025-11-22 10:58:54.266663705 +0000 UTC m=+1266.734501104" watchObservedRunningTime="2025-11-22 10:58:54.27365509 +0000 UTC m=+1266.741492489" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.514191 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6865f54775-4zkqs"] Nov 22 10:58:54 crc kubenswrapper[4938]: E1122 10:58:54.514546 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" containerName="barbican-db-sync" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.514561 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" containerName="barbican-db-sync" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.514755 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" containerName="barbican-db-sync" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.515592 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.522781 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.523107 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.531800 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6865f54775-4zkqs"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.536025 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gsv52" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.546985 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6546d544c4-wcfbr"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.548519 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.550472 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.590041 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6546d544c4-wcfbr"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.591514 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-combined-ca-bundle\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.591586 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.591618 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8xsd\" (UniqueName: \"kubernetes.io/projected/796676ca-fdb7-4ac2-9092-73d2ac3ac760-kube-api-access-z8xsd\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.591690 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/796676ca-fdb7-4ac2-9092-73d2ac3ac760-logs\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.591744 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data-custom\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.626471 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.627840 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.649846 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694144 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694224 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/796676ca-fdb7-4ac2-9092-73d2ac3ac760-logs\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694290 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data-custom\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694322 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-combined-ca-bundle\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694356 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-combined-ca-bundle\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694411 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data-custom\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694433 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694462 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8xsd\" (UniqueName: \"kubernetes.io/projected/796676ca-fdb7-4ac2-9092-73d2ac3ac760-kube-api-access-z8xsd\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694482 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phnhq\" (UniqueName: \"kubernetes.io/projected/0d27fa20-5741-49e1-a69c-5f3c856bea32-kube-api-access-phnhq\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.694501 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d27fa20-5741-49e1-a69c-5f3c856bea32-logs\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.700001 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/796676ca-fdb7-4ac2-9092-73d2ac3ac760-logs\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.706944 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-combined-ca-bundle\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.723310 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8xsd\" (UniqueName: \"kubernetes.io/projected/796676ca-fdb7-4ac2-9092-73d2ac3ac760-kube-api-access-z8xsd\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.732263 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data-custom\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.739508 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/796676ca-fdb7-4ac2-9092-73d2ac3ac760-config-data\") pod \"barbican-worker-6865f54775-4zkqs\" (UID: \"796676ca-fdb7-4ac2-9092-73d2ac3ac760\") " pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.779822 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.784202 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.787371 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.794066 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795779 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795829 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795862 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795937 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-combined-ca-bundle\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795953 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5srvr\" (UniqueName: \"kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.795981 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796002 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796020 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796043 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data-custom\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796072 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phnhq\" (UniqueName: \"kubernetes.io/projected/0d27fa20-5741-49e1-a69c-5f3c856bea32-kube-api-access-phnhq\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796087 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d27fa20-5741-49e1-a69c-5f3c856bea32-logs\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.796485 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d27fa20-5741-49e1-a69c-5f3c856bea32-logs\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.803481 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data-custom\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.804449 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-combined-ca-bundle\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.819872 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d27fa20-5741-49e1-a69c-5f3c856bea32-config-data\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.822458 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phnhq\" (UniqueName: \"kubernetes.io/projected/0d27fa20-5741-49e1-a69c-5f3c856bea32-kube-api-access-phnhq\") pod \"barbican-keystone-listener-6546d544c4-wcfbr\" (UID: \"0d27fa20-5741-49e1-a69c-5f3c856bea32\") " pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.840133 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6865f54775-4zkqs" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.893422 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899247 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899325 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899376 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899418 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899451 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt26k\" (UniqueName: \"kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899472 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899513 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899551 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5srvr\" (UniqueName: \"kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899571 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899600 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.899623 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.900440 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.901179 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.901798 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.902011 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.902520 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.924718 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5srvr\" (UniqueName: \"kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr\") pod \"dnsmasq-dns-7c67bffd47-f8v57\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:54 crc kubenswrapper[4938]: I1122 10:58:54.956310 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.003815 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.003899 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.003987 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.004031 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.004069 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt26k\" (UniqueName: \"kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.004704 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.012612 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.013193 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.018306 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.028506 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt26k\" (UniqueName: \"kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k\") pod \"barbican-api-84958d7694-crwst\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.221724 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.367373 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6865f54775-4zkqs"] Nov 22 10:58:55 crc kubenswrapper[4938]: W1122 10:58:55.392065 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod796676ca_fdb7_4ac2_9092_73d2ac3ac760.slice/crio-6b1e1b1abdf037527f5172865a5af1444d718b06a4c7001730cd10478a756b0a WatchSource:0}: Error finding container 6b1e1b1abdf037527f5172865a5af1444d718b06a4c7001730cd10478a756b0a: Status 404 returned error can't find the container with id 6b1e1b1abdf037527f5172865a5af1444d718b06a4c7001730cd10478a756b0a Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.482936 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:58:55 crc kubenswrapper[4938]: W1122 10:58:55.487747 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe4b55af_8570_431c_bd58_4a1649c0f641.slice/crio-5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887 WatchSource:0}: Error finding container 5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887: Status 404 returned error can't find the container with id 5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887 Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.495377 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6546d544c4-wcfbr"] Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.715394 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-ddndr" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.826742 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.827792 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.828081 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.828123 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.828157 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69qsr\" (UniqueName: \"kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.828269 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id\") pod \"745ffa75-881b-4c0d-8f61-70d872617409\" (UID: \"745ffa75-881b-4c0d-8f61-70d872617409\") " Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.828774 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.834177 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr" (OuterVolumeSpecName: "kube-api-access-69qsr") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "kube-api-access-69qsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.835594 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.835842 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts" (OuterVolumeSpecName: "scripts") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.855161 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.878570 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data" (OuterVolumeSpecName: "config-data") pod "745ffa75-881b-4c0d-8f61-70d872617409" (UID: "745ffa75-881b-4c0d-8f61-70d872617409"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.889669 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930090 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930133 4938 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930146 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69qsr\" (UniqueName: \"kubernetes.io/projected/745ffa75-881b-4c0d-8f61-70d872617409-kube-api-access-69qsr\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930158 4938 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/745ffa75-881b-4c0d-8f61-70d872617409-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930170 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:55 crc kubenswrapper[4938]: I1122 10:58:55.930183 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/745ffa75-881b-4c0d-8f61-70d872617409-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.271852 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerStarted","Data":"9da44cf998ea3acc76355c2a0080d603778a5d70020ca99ff26fb7040e78cab6"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.272190 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerStarted","Data":"139a775eb94aa293ec0b59fec273c2d441728dc07f19fd71c9792ffd0fa090c1"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.273445 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" event={"ID":"0d27fa20-5741-49e1-a69c-5f3c856bea32","Type":"ContainerStarted","Data":"8b24569a58d3ff1159723c6e1c15869340468428c3c4f3ad5ba70651ef501593"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.276419 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-ddndr" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.276449 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-ddndr" event={"ID":"745ffa75-881b-4c0d-8f61-70d872617409","Type":"ContainerDied","Data":"4c6e9b7ec1e60c13244b991088d3fcebeb5a623a6763d3c2a2adab48dd968cf8"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.276565 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c6e9b7ec1e60c13244b991088d3fcebeb5a623a6763d3c2a2adab48dd968cf8" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.279252 4938 generic.go:334] "Generic (PLEG): container finished" podID="0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" containerID="8bac59801cf21b8548fe8306bf06f75f3d14cd3e9f59ae1887aa1b45baada354" exitCode=0 Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.279382 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qd8nf" event={"ID":"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146","Type":"ContainerDied","Data":"8bac59801cf21b8548fe8306bf06f75f3d14cd3e9f59ae1887aa1b45baada354"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.282412 4938 generic.go:334] "Generic (PLEG): container finished" podID="be4b55af-8570-431c-bd58-4a1649c0f641" containerID="9c3fd100bb65d322a041a369c5e56cf59728e6672ff37cc987ab646c33e9bebf" exitCode=0 Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.282536 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" event={"ID":"be4b55af-8570-431c-bd58-4a1649c0f641","Type":"ContainerDied","Data":"9c3fd100bb65d322a041a369c5e56cf59728e6672ff37cc987ab646c33e9bebf"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.282599 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" event={"ID":"be4b55af-8570-431c-bd58-4a1649c0f641","Type":"ContainerStarted","Data":"5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.298158 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6865f54775-4zkqs" event={"ID":"796676ca-fdb7-4ac2-9092-73d2ac3ac760","Type":"ContainerStarted","Data":"6b1e1b1abdf037527f5172865a5af1444d718b06a4c7001730cd10478a756b0a"} Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.487254 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:56 crc kubenswrapper[4938]: E1122 10:58:56.487841 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="745ffa75-881b-4c0d-8f61-70d872617409" containerName="cinder-db-sync" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.487854 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="745ffa75-881b-4c0d-8f61-70d872617409" containerName="cinder-db-sync" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.497617 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="745ffa75-881b-4c0d-8f61-70d872617409" containerName="cinder-db-sync" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.533131 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.542219 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.555535 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.555825 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.556055 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-s7p24" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.556073 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.597305 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.643297 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.651487 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653035 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq4nr\" (UniqueName: \"kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653077 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653105 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653139 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653203 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.653221 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.672646 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754355 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754395 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754448 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754478 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq4nr\" (UniqueName: \"kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754501 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754526 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754553 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754573 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5rql\" (UniqueName: \"kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754589 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754633 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754675 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754712 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.754809 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.765668 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.768073 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.773634 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.774574 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.795837 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.796712 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.803032 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.812513 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq4nr\" (UniqueName: \"kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr\") pod \"cinder-scheduler-0\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.815400 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858606 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858676 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858731 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858761 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858792 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858847 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858892 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858958 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.858983 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn2nj\" (UniqueName: \"kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.859002 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.859055 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.859077 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5rql\" (UniqueName: \"kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.859097 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.860329 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.860605 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.861270 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.866323 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.883212 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.887359 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5rql\" (UniqueName: \"kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql\") pod \"dnsmasq-dns-5cc8b5d5c5-98tqv\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.906720 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960507 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960579 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960618 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960675 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960731 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960763 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn2nj\" (UniqueName: \"kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960810 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.960846 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.961744 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.965963 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.968390 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.968668 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.972241 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:56 crc kubenswrapper[4938]: I1122 10:58:56.982718 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn2nj\" (UniqueName: \"kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj\") pod \"cinder-api-0\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " pod="openstack/cinder-api-0" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.007622 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.262175 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.309179 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerStarted","Data":"ca1d6435f4c16f63e13af258476bbcd307e59a65ef20abff44aec9b14117ef8a"} Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.309469 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.309489 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.330623 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-84958d7694-crwst" podStartSLOduration=3.330602853 podStartE2EDuration="3.330602853s" podCreationTimestamp="2025-11-22 10:58:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:57.323456444 +0000 UTC m=+1269.791293843" watchObservedRunningTime="2025-11-22 10:58:57.330602853 +0000 UTC m=+1269.798440253" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.871428 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.977885 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb8tg\" (UniqueName: \"kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg\") pod \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.978074 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config\") pod \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.978161 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle\") pod \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\" (UID: \"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146\") " Nov 22 10:58:57 crc kubenswrapper[4938]: I1122 10:58:57.986606 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg" (OuterVolumeSpecName: "kube-api-access-zb8tg") pod "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" (UID: "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146"). InnerVolumeSpecName "kube-api-access-zb8tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.058195 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config" (OuterVolumeSpecName: "config") pod "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" (UID: "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.080439 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb8tg\" (UniqueName: \"kubernetes.io/projected/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-kube-api-access-zb8tg\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.080651 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.090132 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" (UID: "0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.184801 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.222792 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.361200 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerStarted","Data":"52656e4ab4ef47de18a73b7c0a738de77bc52a1a071954c9dac6e943ab59d2a4"} Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.375067 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-qd8nf" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.376098 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-qd8nf" event={"ID":"0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146","Type":"ContainerDied","Data":"cc09d69a9d593c605f3c808232aa39dd747fa963bc22646b850adf78c2326a4b"} Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.376133 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc09d69a9d593c605f3c808232aa39dd747fa963bc22646b850adf78c2326a4b" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.408011 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.423748 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.625086 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.737435 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 10:58:58 crc kubenswrapper[4938]: E1122 10:58:58.738193 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" containerName="neutron-db-sync" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.738210 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" containerName="neutron-db-sync" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.748309 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" containerName="neutron-db-sync" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.761434 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.826014 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.847436 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.848930 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.853436 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mvrv7" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.853707 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.853901 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.854015 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.871081 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918509 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9zkl\" (UniqueName: \"kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918570 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xmft\" (UniqueName: \"kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918595 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918646 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918679 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918696 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918724 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918743 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918760 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918777 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:58 crc kubenswrapper[4938]: I1122 10:58:58.918797 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.020838 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021169 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021213 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021235 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021253 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021270 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021292 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021348 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9zkl\" (UniqueName: \"kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021377 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xmft\" (UniqueName: \"kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021400 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.021447 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.022901 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.028382 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.031646 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.032491 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.041502 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.041506 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.046443 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.047795 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xmft\" (UniqueName: \"kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.047879 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.070347 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config\") pod \"neutron-798b4b7d-4sxkg\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.077280 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9zkl\" (UniqueName: \"kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl\") pod \"dnsmasq-dns-6578955fd5-xbj6x\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.118811 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.190372 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.408865 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" event={"ID":"0d27fa20-5741-49e1-a69c-5f3c856bea32","Type":"ContainerStarted","Data":"cdf84fec7fa4ffa5b7582efa0e9b0e1b520445715c547ba30ca909778199ea13"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.408931 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" event={"ID":"0d27fa20-5741-49e1-a69c-5f3c856bea32","Type":"ContainerStarted","Data":"233ba7563bc8dfe698b3400c014a65b56701f9ce8c669060d23e23b04596de38"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.415379 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" event={"ID":"be4b55af-8570-431c-bd58-4a1649c0f641","Type":"ContainerStarted","Data":"7237993756da1207b595f4b3b36fefae50714ad40758045c8369d8564fc43995"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.415648 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="dnsmasq-dns" containerID="cri-o://7237993756da1207b595f4b3b36fefae50714ad40758045c8369d8564fc43995" gracePeriod=10 Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.415903 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.425420 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6865f54775-4zkqs" event={"ID":"796676ca-fdb7-4ac2-9092-73d2ac3ac760","Type":"ContainerStarted","Data":"bd20f4796afc1d5cf7f71760d1af0cba98e063b33b3080bffaff48970b05c00a"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.425475 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6865f54775-4zkqs" event={"ID":"796676ca-fdb7-4ac2-9092-73d2ac3ac760","Type":"ContainerStarted","Data":"2dda441bf0992bada13c2af746910871e23c0d8f8bf5f021a8e2bee8ff39d58a"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.431193 4938 generic.go:334] "Generic (PLEG): container finished" podID="1bd35528-3a46-4ed8-8817-137c1f1e9d35" containerID="30333e0a74e81ce344627b28176a83fd53d72e1e91705c2bf758188a8b5666c8" exitCode=0 Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.431264 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" event={"ID":"1bd35528-3a46-4ed8-8817-137c1f1e9d35","Type":"ContainerDied","Data":"30333e0a74e81ce344627b28176a83fd53d72e1e91705c2bf758188a8b5666c8"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.431295 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" event={"ID":"1bd35528-3a46-4ed8-8817-137c1f1e9d35","Type":"ContainerStarted","Data":"bd638eea4cd25e68b433f9d7e1d335f977b95e73db457ebef4314227692abc54"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.453966 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerStarted","Data":"64190bebc34fe179c5a2e37d0d68cec60bee909e7651a9c36bf781fd53f88ee5"} Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.466786 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6546d544c4-wcfbr" podStartSLOduration=3.214187336 podStartE2EDuration="5.466762021s" podCreationTimestamp="2025-11-22 10:58:54 +0000 UTC" firstStartedPulling="2025-11-22 10:58:55.502805839 +0000 UTC m=+1267.970643228" lastFinishedPulling="2025-11-22 10:58:57.755380514 +0000 UTC m=+1270.223217913" observedRunningTime="2025-11-22 10:58:59.435774825 +0000 UTC m=+1271.903612234" watchObservedRunningTime="2025-11-22 10:58:59.466762021 +0000 UTC m=+1271.934599420" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.473626 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6865f54775-4zkqs" podStartSLOduration=3.129908705 podStartE2EDuration="5.473604292s" podCreationTimestamp="2025-11-22 10:58:54 +0000 UTC" firstStartedPulling="2025-11-22 10:58:55.396369163 +0000 UTC m=+1267.864206562" lastFinishedPulling="2025-11-22 10:58:57.74006475 +0000 UTC m=+1270.207902149" observedRunningTime="2025-11-22 10:58:59.463664243 +0000 UTC m=+1271.931501642" watchObservedRunningTime="2025-11-22 10:58:59.473604292 +0000 UTC m=+1271.941441691" Nov 22 10:58:59 crc kubenswrapper[4938]: I1122 10:58:59.524826 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" podStartSLOduration=5.524806185 podStartE2EDuration="5.524806185s" podCreationTimestamp="2025-11-22 10:58:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:59.521663396 +0000 UTC m=+1271.989500795" watchObservedRunningTime="2025-11-22 10:58:59.524806185 +0000 UTC m=+1271.992643594" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.424936 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.501256 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerStarted","Data":"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45"} Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.512671 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" event={"ID":"eb445e4c-0293-4b98-8adb-0d7b9613b932","Type":"ContainerStarted","Data":"c492513bd5f2404871c8752e9e2acc93316f8ea863f51651247a66eb73567bd4"} Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.516020 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:59:00 crc kubenswrapper[4938]: W1122 10:59:00.522098 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78c4b6bb_4906_4866_b45d_1ad4d3a95ce4.slice/crio-ba64b361a104eccb7bef3c959487d66452b4827dea31a49c3d4b019fd06e7abb WatchSource:0}: Error finding container ba64b361a104eccb7bef3c959487d66452b4827dea31a49c3d4b019fd06e7abb: Status 404 returned error can't find the container with id ba64b361a104eccb7bef3c959487d66452b4827dea31a49c3d4b019fd06e7abb Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.524125 4938 generic.go:334] "Generic (PLEG): container finished" podID="be4b55af-8570-431c-bd58-4a1649c0f641" containerID="7237993756da1207b595f4b3b36fefae50714ad40758045c8369d8564fc43995" exitCode=0 Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.524187 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" event={"ID":"be4b55af-8570-431c-bd58-4a1649c0f641","Type":"ContainerDied","Data":"7237993756da1207b595f4b3b36fefae50714ad40758045c8369d8564fc43995"} Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.524209 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" event={"ID":"be4b55af-8570-431c-bd58-4a1649c0f641","Type":"ContainerDied","Data":"5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887"} Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.524219 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c1e54a0e6fc8dd3e25a6838487a07f9199e349155b6b27c226f4c8129d6e887" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.543067 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" event={"ID":"1bd35528-3a46-4ed8-8817-137c1f1e9d35","Type":"ContainerDied","Data":"bd638eea4cd25e68b433f9d7e1d335f977b95e73db457ebef4314227692abc54"} Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.543103 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd638eea4cd25e68b433f9d7e1d335f977b95e73db457ebef4314227692abc54" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.679691 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.753211 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5d64bdcd96-8b525" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.761891 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.787829 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.886421 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.886724 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.886850 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5rql\" (UniqueName: \"kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.886948 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5srvr\" (UniqueName: \"kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887036 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887125 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887206 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887336 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887444 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887567 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887667 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb\") pod \"be4b55af-8570-431c-bd58-4a1649c0f641\" (UID: \"be4b55af-8570-431c-bd58-4a1649c0f641\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.887743 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb\") pod \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\" (UID: \"1bd35528-3a46-4ed8-8817-137c1f1e9d35\") " Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.944124 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr" (OuterVolumeSpecName: "kube-api-access-5srvr") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "kube-api-access-5srvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.945442 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql" (OuterVolumeSpecName: "kube-api-access-b5rql") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "kube-api-access-b5rql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.959801 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.990426 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.990458 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5rql\" (UniqueName: \"kubernetes.io/projected/1bd35528-3a46-4ed8-8817-137c1f1e9d35-kube-api-access-b5rql\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:00 crc kubenswrapper[4938]: I1122 10:59:00.990468 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5srvr\" (UniqueName: \"kubernetes.io/projected/be4b55af-8570-431c-bd58-4a1649c0f641-kube-api-access-5srvr\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.013985 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.148950 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.195277 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.200720 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config" (OuterVolumeSpecName: "config") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.204263 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.205050 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1bd35528-3a46-4ed8-8817-137c1f1e9d35" (UID: "1bd35528-3a46-4ed8-8817-137c1f1e9d35"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.217147 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config" (OuterVolumeSpecName: "config") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.225274 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.278002 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.281284 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.294149 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "be4b55af-8570-431c-bd58-4a1649c0f641" (UID: "be4b55af-8570-431c-bd58-4a1649c0f641"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.296862 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297190 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297205 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297223 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1bd35528-3a46-4ed8-8817-137c1f1e9d35-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297237 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297248 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297261 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.297273 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/be4b55af-8570-431c-bd58-4a1649c0f641-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.409838 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6bf8df76c8-5c2xm" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.599432 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerStarted","Data":"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.599968 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api-log" containerID="cri-o://9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45" gracePeriod=30 Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.600121 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api" containerID="cri-o://88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f" gracePeriod=30 Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.600585 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.620976 4938 generic.go:334] "Generic (PLEG): container finished" podID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerID="5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470" exitCode=0 Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.621258 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" event={"ID":"eb445e4c-0293-4b98-8adb-0d7b9613b932","Type":"ContainerDied","Data":"5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.628152 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.62813419 podStartE2EDuration="5.62813419s" podCreationTimestamp="2025-11-22 10:58:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:01.624421837 +0000 UTC m=+1274.092259236" watchObservedRunningTime="2025-11-22 10:59:01.62813419 +0000 UTC m=+1274.095971589" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.655297 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerStarted","Data":"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.655337 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerStarted","Data":"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.655349 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerStarted","Data":"ba64b361a104eccb7bef3c959487d66452b4827dea31a49c3d4b019fd06e7abb"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.655381 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.666336 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerStarted","Data":"3078b352839c9f2f1963d0c8a76c468373dea2d26127078f1e5e4199a6e0074a"} Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.666365 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-98tqv" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.667870 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-f8v57" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.679898 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-798b4b7d-4sxkg" podStartSLOduration=3.679879296 podStartE2EDuration="3.679879296s" podCreationTimestamp="2025-11-22 10:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:01.6724298 +0000 UTC m=+1274.140267199" watchObservedRunningTime="2025-11-22 10:59:01.679879296 +0000 UTC m=+1274.147716695" Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.839023 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.858629 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-98tqv"] Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.870975 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:59:01 crc kubenswrapper[4938]: I1122 10:59:01.884095 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-f8v57"] Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.460290 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bd35528-3a46-4ed8-8817-137c1f1e9d35" path="/var/lib/kubelet/pods/1bd35528-3a46-4ed8-8817-137c1f1e9d35/volumes" Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.461083 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" path="/var/lib/kubelet/pods/be4b55af-8570-431c-bd58-4a1649c0f641/volumes" Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.676924 4938 generic.go:334] "Generic (PLEG): container finished" podID="134cb80c-67f8-45ed-b602-68bce2f35109" containerID="9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45" exitCode=143 Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.676959 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerDied","Data":"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45"} Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.679819 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" event={"ID":"eb445e4c-0293-4b98-8adb-0d7b9613b932","Type":"ContainerStarted","Data":"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499"} Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.679993 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.682236 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerStarted","Data":"596c968b22cb20e10118ac04c47d10b22d78612800d0cab62a22fe6e3cdc8363"} Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.701130 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" podStartSLOduration=4.701109658 podStartE2EDuration="4.701109658s" podCreationTimestamp="2025-11-22 10:58:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:02.69639987 +0000 UTC m=+1275.164237269" watchObservedRunningTime="2025-11-22 10:59:02.701109658 +0000 UTC m=+1275.168947057" Nov 22 10:59:02 crc kubenswrapper[4938]: I1122 10:59:02.721609 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.800754756 podStartE2EDuration="6.721591801s" podCreationTimestamp="2025-11-22 10:58:56 +0000 UTC" firstStartedPulling="2025-11-22 10:58:58.451051309 +0000 UTC m=+1270.918888708" lastFinishedPulling="2025-11-22 10:59:00.371888354 +0000 UTC m=+1272.839725753" observedRunningTime="2025-11-22 10:59:02.720841102 +0000 UTC m=+1275.188678501" watchObservedRunningTime="2025-11-22 10:59:02.721591801 +0000 UTC m=+1275.189429190" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.673464 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 10:59:04 crc kubenswrapper[4938]: E1122 10:59:04.674142 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="init" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.674156 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="init" Nov 22 10:59:04 crc kubenswrapper[4938]: E1122 10:59:04.674187 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bd35528-3a46-4ed8-8817-137c1f1e9d35" containerName="init" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.674194 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bd35528-3a46-4ed8-8817-137c1f1e9d35" containerName="init" Nov 22 10:59:04 crc kubenswrapper[4938]: E1122 10:59:04.674207 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="dnsmasq-dns" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.674213 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="dnsmasq-dns" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.674410 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4b55af-8570-431c-bd58-4a1649c0f641" containerName="dnsmasq-dns" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.674422 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bd35528-3a46-4ed8-8817-137c1f1e9d35" containerName="init" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.683428 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.683556 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.686825 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.686863 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-5b4ln" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.687343 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.779288 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config-secret\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.779383 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.779458 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2xr2\" (UniqueName: \"kubernetes.io/projected/cc16df53-2254-4dc7-8914-88afcbc0b5c4-kube-api-access-b2xr2\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.779590 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.881685 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2xr2\" (UniqueName: \"kubernetes.io/projected/cc16df53-2254-4dc7-8914-88afcbc0b5c4-kube-api-access-b2xr2\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.881752 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.881816 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config-secret\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.881842 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.883486 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.899594 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.904511 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/cc16df53-2254-4dc7-8914-88afcbc0b5c4-openstack-config-secret\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.908406 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2xr2\" (UniqueName: \"kubernetes.io/projected/cc16df53-2254-4dc7-8914-88afcbc0b5c4-kube-api-access-b2xr2\") pod \"openstackclient\" (UID: \"cc16df53-2254-4dc7-8914-88afcbc0b5c4\") " pod="openstack/openstackclient" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.953037 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7768cc7979-hrkwz"] Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.954552 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.956613 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.957161 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 22 10:59:04 crc kubenswrapper[4938]: I1122 10:59:04.962860 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7768cc7979-hrkwz"] Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.004878 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087170 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-httpd-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087219 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj5hd\" (UniqueName: \"kubernetes.io/projected/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-kube-api-access-bj5hd\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087301 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-combined-ca-bundle\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087346 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-internal-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087391 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-public-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087423 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-ovndb-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.087460 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.190383 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-combined-ca-bundle\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.190800 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-internal-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.190862 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-public-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.190906 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-ovndb-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.190983 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.191029 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-httpd-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.191064 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj5hd\" (UniqueName: \"kubernetes.io/projected/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-kube-api-access-bj5hd\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.199756 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-combined-ca-bundle\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.200031 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-public-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.200183 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-internal-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.205941 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-ovndb-tls-certs\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.214203 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-httpd-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.214821 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-config\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.218747 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj5hd\" (UniqueName: \"kubernetes.io/projected/c4f7f822-da01-4216-9e4c-5ee8a9aa8495-kube-api-access-bj5hd\") pod \"neutron-7768cc7979-hrkwz\" (UID: \"c4f7f822-da01-4216-9e4c-5ee8a9aa8495\") " pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.294223 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.392148 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5cbdfff8c8-z9wrl"] Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.393651 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.403324 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.416384 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5cbdfff8c8-z9wrl"] Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.416659 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.500627 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504205 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-combined-ca-bundle\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504250 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-internal-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504276 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/86ceb17d-9778-45f9-a75e-ed96d5abe722-logs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504410 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cddtm\" (UniqueName: \"kubernetes.io/projected/86ceb17d-9778-45f9-a75e-ed96d5abe722-kube-api-access-cddtm\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504501 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-public-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.504572 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data-custom\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.554152 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.605807 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data-custom\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.605979 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.606209 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-combined-ca-bundle\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.606235 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-internal-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.606253 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/86ceb17d-9778-45f9-a75e-ed96d5abe722-logs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.606278 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cddtm\" (UniqueName: \"kubernetes.io/projected/86ceb17d-9778-45f9-a75e-ed96d5abe722-kube-api-access-cddtm\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.606299 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-public-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.612235 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/86ceb17d-9778-45f9-a75e-ed96d5abe722-logs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.615161 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-combined-ca-bundle\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.615606 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data-custom\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.621061 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-public-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.623486 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-internal-tls-certs\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.624856 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/86ceb17d-9778-45f9-a75e-ed96d5abe722-config-data\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.635494 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cddtm\" (UniqueName: \"kubernetes.io/projected/86ceb17d-9778-45f9-a75e-ed96d5abe722-kube-api-access-cddtm\") pod \"barbican-api-5cbdfff8c8-z9wrl\" (UID: \"86ceb17d-9778-45f9-a75e-ed96d5abe722\") " pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.707853 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"cc16df53-2254-4dc7-8914-88afcbc0b5c4","Type":"ContainerStarted","Data":"ba89dfb6be5d5aaa663f6158ca06b1b8f8aaea6f049fdec5b530daef2d09af61"} Nov 22 10:59:05 crc kubenswrapper[4938]: I1122 10:59:05.731418 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.005936 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7768cc7979-hrkwz"] Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.292867 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5cbdfff8c8-z9wrl"] Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.724528 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" event={"ID":"86ceb17d-9778-45f9-a75e-ed96d5abe722","Type":"ContainerStarted","Data":"1a7bab0ed535506134130f9da8f2f1cef8b36ac5b95a2e135ce2d9fc5e89ca40"} Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.724894 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" event={"ID":"86ceb17d-9778-45f9-a75e-ed96d5abe722","Type":"ContainerStarted","Data":"37fb852330e80253399b5afed3d492116696208a1c688f3c650c3c305ad0ce72"} Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.728545 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7768cc7979-hrkwz" event={"ID":"c4f7f822-da01-4216-9e4c-5ee8a9aa8495","Type":"ContainerStarted","Data":"505b7a082084ecdecc0938bd5b3924931cf5837fb8d23621347c3cf087fe5f95"} Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.728602 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7768cc7979-hrkwz" event={"ID":"c4f7f822-da01-4216-9e4c-5ee8a9aa8495","Type":"ContainerStarted","Data":"3d63a433463abb78a24b1b78caba1b780a489c0d2e4073b1558c0e3aedf205a9"} Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.728613 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7768cc7979-hrkwz" event={"ID":"c4f7f822-da01-4216-9e4c-5ee8a9aa8495","Type":"ContainerStarted","Data":"ad9d0a332c23f2d2bdc64d7ff1c896f84adc23f0ca68d40f9c3fdd783a19ae7f"} Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.730495 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.766955 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7768cc7979-hrkwz" podStartSLOduration=2.7669342009999998 podStartE2EDuration="2.766934201s" podCreationTimestamp="2025-11-22 10:59:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:06.757858614 +0000 UTC m=+1279.225696013" watchObservedRunningTime="2025-11-22 10:59:06.766934201 +0000 UTC m=+1279.234771600" Nov 22 10:59:06 crc kubenswrapper[4938]: I1122 10:59:06.908033 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.206520 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.223727 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.747074 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" event={"ID":"86ceb17d-9778-45f9-a75e-ed96d5abe722","Type":"ContainerStarted","Data":"2bbf09355d534b69a40e2e4d5275990137e71a8fbf342203595acd631360d2e7"} Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.748754 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.749029 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.813983 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.836861 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" podStartSLOduration=2.836840181 podStartE2EDuration="2.836840181s" podCreationTimestamp="2025-11-22 10:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:07.797331042 +0000 UTC m=+1280.265168441" watchObservedRunningTime="2025-11-22 10:59:07.836840181 +0000 UTC m=+1280.304677580" Nov 22 10:59:07 crc kubenswrapper[4938]: I1122 10:59:07.888623 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:59:08 crc kubenswrapper[4938]: I1122 10:59:08.761245 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="cinder-scheduler" containerID="cri-o://3078b352839c9f2f1963d0c8a76c468373dea2d26127078f1e5e4199a6e0074a" gracePeriod=30 Nov 22 10:59:08 crc kubenswrapper[4938]: I1122 10:59:08.761823 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="probe" containerID="cri-o://596c968b22cb20e10118ac04c47d10b22d78612800d0cab62a22fe6e3cdc8363" gracePeriod=30 Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.120103 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.182388 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.182620 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="dnsmasq-dns" containerID="cri-o://57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716" gracePeriod=10 Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.770665 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.778062 4938 generic.go:334] "Generic (PLEG): container finished" podID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerID="57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716" exitCode=0 Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.778104 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" event={"ID":"a071ebb7-d74e-4b58-b01d-e20eaf91150e","Type":"ContainerDied","Data":"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716"} Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.778107 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.778128 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-mf78l" event={"ID":"a071ebb7-d74e-4b58-b01d-e20eaf91150e","Type":"ContainerDied","Data":"303e28e9c6fbe9ffce079bd051d6d96612aba4640bf2555235b7a6a12ffb5878"} Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.778145 4938 scope.go:117] "RemoveContainer" containerID="57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.820244 4938 scope.go:117] "RemoveContainer" containerID="19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.864255 4938 scope.go:117] "RemoveContainer" containerID="57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716" Nov 22 10:59:09 crc kubenswrapper[4938]: E1122 10:59:09.865197 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716\": container with ID starting with 57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716 not found: ID does not exist" containerID="57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.865229 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716"} err="failed to get container status \"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716\": rpc error: code = NotFound desc = could not find container \"57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716\": container with ID starting with 57a9f08db3e712f00f496e503266635df467498a9a28625078aba2e1777e5716 not found: ID does not exist" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.865254 4938 scope.go:117] "RemoveContainer" containerID="19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6" Nov 22 10:59:09 crc kubenswrapper[4938]: E1122 10:59:09.865697 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6\": container with ID starting with 19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6 not found: ID does not exist" containerID="19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.865721 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6"} err="failed to get container status \"19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6\": rpc error: code = NotFound desc = could not find container \"19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6\": container with ID starting with 19e49d629c963760cfb9528147cb5832258b58cada7d29c23167c9df7a0842b6 not found: ID does not exist" Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955259 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955330 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955427 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955453 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955485 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q82jp\" (UniqueName: \"kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.955597 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config\") pod \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\" (UID: \"a071ebb7-d74e-4b58-b01d-e20eaf91150e\") " Nov 22 10:59:09 crc kubenswrapper[4938]: I1122 10:59:09.965285 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp" (OuterVolumeSpecName: "kube-api-access-q82jp") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "kube-api-access-q82jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.026126 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.035243 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.046337 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.048925 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.058035 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.058079 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.058097 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.058112 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q82jp\" (UniqueName: \"kubernetes.io/projected/a071ebb7-d74e-4b58-b01d-e20eaf91150e-kube-api-access-q82jp\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.058124 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.064367 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config" (OuterVolumeSpecName: "config") pod "a071ebb7-d74e-4b58-b01d-e20eaf91150e" (UID: "a071ebb7-d74e-4b58-b01d-e20eaf91150e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.130837 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.144581 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-mf78l"] Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.160120 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a071ebb7-d74e-4b58-b01d-e20eaf91150e-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.314013 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.459414 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" path="/var/lib/kubelet/pods/a071ebb7-d74e-4b58-b01d-e20eaf91150e/volumes" Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.788793 4938 generic.go:334] "Generic (PLEG): container finished" podID="278730a8-49d3-465d-a6b3-49f52848cc46" containerID="596c968b22cb20e10118ac04c47d10b22d78612800d0cab62a22fe6e3cdc8363" exitCode=0 Nov 22 10:59:10 crc kubenswrapper[4938]: I1122 10:59:10.788844 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerDied","Data":"596c968b22cb20e10118ac04c47d10b22d78612800d0cab62a22fe6e3cdc8363"} Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.069620 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-77b7545b4c-scc96"] Nov 22 10:59:11 crc kubenswrapper[4938]: E1122 10:59:11.070355 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="init" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.070377 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="init" Nov 22 10:59:11 crc kubenswrapper[4938]: E1122 10:59:11.070419 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="dnsmasq-dns" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.070425 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="dnsmasq-dns" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.070612 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="a071ebb7-d74e-4b58-b01d-e20eaf91150e" containerName="dnsmasq-dns" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.071553 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.076339 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.076405 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.076529 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.081797 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77b7545b4c-scc96"] Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178699 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-etc-swift\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178766 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-run-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178799 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-log-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178826 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-public-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178851 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-internal-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178947 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-config-data\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.178973 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7pd9\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-kube-api-access-w7pd9\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.179010 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-combined-ca-bundle\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280646 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-config-data\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280712 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7pd9\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-kube-api-access-w7pd9\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280769 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-combined-ca-bundle\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280811 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-etc-swift\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280847 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-run-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280884 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-log-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280940 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-public-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.280974 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-internal-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.282171 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-log-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.282676 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d1ea1eda-81bc-455d-9f0d-68324fbe5992-run-httpd\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.285350 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-public-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.286234 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-internal-tls-certs\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.286368 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-combined-ca-bundle\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.287036 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ea1eda-81bc-455d-9f0d-68324fbe5992-config-data\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.300159 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-etc-swift\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.307785 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7pd9\" (UniqueName: \"kubernetes.io/projected/d1ea1eda-81bc-455d-9f0d-68324fbe5992-kube-api-access-w7pd9\") pod \"swift-proxy-77b7545b4c-scc96\" (UID: \"d1ea1eda-81bc-455d-9f0d-68324fbe5992\") " pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.399691 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:11 crc kubenswrapper[4938]: I1122 10:59:11.981001 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77b7545b4c-scc96"] Nov 22 10:59:12 crc kubenswrapper[4938]: I1122 10:59:12.810684 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77b7545b4c-scc96" event={"ID":"d1ea1eda-81bc-455d-9f0d-68324fbe5992","Type":"ContainerStarted","Data":"e2b0eb44b0dd0f869b2cb781cbdcb97a9ed86a4946a74a74c1c26d3c1716659c"} Nov 22 10:59:14 crc kubenswrapper[4938]: I1122 10:59:14.844303 4938 generic.go:334] "Generic (PLEG): container finished" podID="55be79ce-3816-4644-9e33-3762615249e3" containerID="2d2f992d56e048a9a6b64125a55a634ae075ae2da25c37cfa892307d7c8fb3e1" exitCode=137 Nov 22 10:59:14 crc kubenswrapper[4938]: I1122 10:59:14.844383 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerDied","Data":"2d2f992d56e048a9a6b64125a55a634ae075ae2da25c37cfa892307d7c8fb3e1"} Nov 22 10:59:14 crc kubenswrapper[4938]: I1122 10:59:14.875393 4938 generic.go:334] "Generic (PLEG): container finished" podID="278730a8-49d3-465d-a6b3-49f52848cc46" containerID="3078b352839c9f2f1963d0c8a76c468373dea2d26127078f1e5e4199a6e0074a" exitCode=0 Nov 22 10:59:14 crc kubenswrapper[4938]: I1122 10:59:14.875498 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerDied","Data":"3078b352839c9f2f1963d0c8a76c468373dea2d26127078f1e5e4199a6e0074a"} Nov 22 10:59:14 crc kubenswrapper[4938]: I1122 10:59:14.878120 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77b7545b4c-scc96" event={"ID":"d1ea1eda-81bc-455d-9f0d-68324fbe5992","Type":"ContainerStarted","Data":"64f8f99e13a04ab5c24723aa4994a662c3eff50a4e617622cd5cdac7e4914a34"} Nov 22 10:59:15 crc kubenswrapper[4938]: I1122 10:59:15.889678 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77b7545b4c-scc96" event={"ID":"d1ea1eda-81bc-455d-9f0d-68324fbe5992","Type":"ContainerStarted","Data":"774e599802b2805b81872e788046e79dc804a425bc700f5eb22a688cb9539a4f"} Nov 22 10:59:15 crc kubenswrapper[4938]: I1122 10:59:15.890158 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:15 crc kubenswrapper[4938]: I1122 10:59:15.890176 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:15 crc kubenswrapper[4938]: I1122 10:59:15.917085 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-77b7545b4c-scc96" podStartSLOduration=4.917063019 podStartE2EDuration="4.917063019s" podCreationTimestamp="2025-11-22 10:59:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:15.907609603 +0000 UTC m=+1288.375447012" watchObservedRunningTime="2025-11-22 10:59:15.917063019 +0000 UTC m=+1288.384900428" Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.523641 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.523932 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-central-agent" containerID="cri-o://c9ab59bc6f8122d1a745c1e14123d3595e9efcc4b08fb7eea8af7ad85061f75d" gracePeriod=30 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.525031 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="proxy-httpd" containerID="cri-o://4e7803c1f3f27e0b753d33402a9fcf9c26ef0b2cabbeb21a566a734c28b67556" gracePeriod=30 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.525459 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="sg-core" containerID="cri-o://2c28d27fbd9e0a4b8deea8ae9420b5fd5d5b141cd99c146a17e89bbfcbcb8d97" gracePeriod=30 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.528352 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-notification-agent" containerID="cri-o://806e9b3c1459f2976be2e86e8e13e39d6942530ce51145e93a9031cb96c9ab2b" gracePeriod=30 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.533348 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.901426 4938 generic.go:334] "Generic (PLEG): container finished" podID="12962edb-6a81-4491-8ae2-85efe7102284" containerID="4e7803c1f3f27e0b753d33402a9fcf9c26ef0b2cabbeb21a566a734c28b67556" exitCode=0 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.901732 4938 generic.go:334] "Generic (PLEG): container finished" podID="12962edb-6a81-4491-8ae2-85efe7102284" containerID="2c28d27fbd9e0a4b8deea8ae9420b5fd5d5b141cd99c146a17e89bbfcbcb8d97" exitCode=2 Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.901485 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerDied","Data":"4e7803c1f3f27e0b753d33402a9fcf9c26ef0b2cabbeb21a566a734c28b67556"} Nov 22 10:59:16 crc kubenswrapper[4938]: I1122 10:59:16.901783 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerDied","Data":"2c28d27fbd9e0a4b8deea8ae9420b5fd5d5b141cd99c146a17e89bbfcbcb8d97"} Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.513385 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.710764 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5cbdfff8c8-z9wrl" Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.799084 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.799351 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84958d7694-crwst" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api-log" containerID="cri-o://9da44cf998ea3acc76355c2a0080d603778a5d70020ca99ff26fb7040e78cab6" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.799875 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-84958d7694-crwst" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api" containerID="cri-o://ca1d6435f4c16f63e13af258476bbcd307e59a65ef20abff44aec9b14117ef8a" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.918934 4938 generic.go:334] "Generic (PLEG): container finished" podID="12962edb-6a81-4491-8ae2-85efe7102284" containerID="c9ab59bc6f8122d1a745c1e14123d3595e9efcc4b08fb7eea8af7ad85061f75d" exitCode=0 Nov 22 10:59:17 crc kubenswrapper[4938]: I1122 10:59:17.919498 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerDied","Data":"c9ab59bc6f8122d1a745c1e14123d3595e9efcc4b08fb7eea8af7ad85061f75d"} Nov 22 10:59:18 crc kubenswrapper[4938]: I1122 10:59:18.932844 4938 generic.go:334] "Generic (PLEG): container finished" podID="03728744-ec67-4880-9913-40bd5e011e60" containerID="9da44cf998ea3acc76355c2a0080d603778a5d70020ca99ff26fb7040e78cab6" exitCode=143 Nov 22 10:59:18 crc kubenswrapper[4938]: I1122 10:59:18.932888 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerDied","Data":"9da44cf998ea3acc76355c2a0080d603778a5d70020ca99ff26fb7040e78cab6"} Nov 22 10:59:19 crc kubenswrapper[4938]: I1122 10:59:19.604403 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.154:3000/\": dial tcp 10.217.0.154:3000: connect: connection refused" Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.475308 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.475509 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" containerName="kube-state-metrics" containerID="cri-o://1e006da9203224e6cd4b9b07506bbd01e2737b7e51070df4e965e9ade9faf8e2" gracePeriod=30 Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.958487 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerDied","Data":"806e9b3c1459f2976be2e86e8e13e39d6942530ce51145e93a9031cb96c9ab2b"} Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.958491 4938 generic.go:334] "Generic (PLEG): container finished" podID="12962edb-6a81-4491-8ae2-85efe7102284" containerID="806e9b3c1459f2976be2e86e8e13e39d6942530ce51145e93a9031cb96c9ab2b" exitCode=0 Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.960921 4938 generic.go:334] "Generic (PLEG): container finished" podID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" containerID="1e006da9203224e6cd4b9b07506bbd01e2737b7e51070df4e965e9ade9faf8e2" exitCode=2 Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.960953 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"15e6e3f1-ae9e-4a70-8342-74d6554ec24c","Type":"ContainerDied","Data":"1e006da9203224e6cd4b9b07506bbd01e2737b7e51070df4e965e9ade9faf8e2"} Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.963086 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-84958d7694-crwst" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:37518->10.217.0.158:9311: read: connection reset by peer" Nov 22 10:59:20 crc kubenswrapper[4938]: I1122 10:59:20.963098 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-84958d7694-crwst" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:37516->10.217.0.158:9311: read: connection reset by peer" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.426867 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.427757 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77b7545b4c-scc96" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.528501 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.621309 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc555\" (UniqueName: \"kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555\") pod \"55be79ce-3816-4644-9e33-3762615249e3\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.621359 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key\") pod \"55be79ce-3816-4644-9e33-3762615249e3\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.629233 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "55be79ce-3816-4644-9e33-3762615249e3" (UID: "55be79ce-3816-4644-9e33-3762615249e3"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.629402 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555" (OuterVolumeSpecName: "kube-api-access-vc555") pod "55be79ce-3816-4644-9e33-3762615249e3" (UID: "55be79ce-3816-4644-9e33-3762615249e3"). InnerVolumeSpecName "kube-api-access-vc555". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.706657 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728209 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs\") pod \"55be79ce-3816-4644-9e33-3762615249e3\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728270 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data\") pod \"55be79ce-3816-4644-9e33-3762615249e3\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728318 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts\") pod \"55be79ce-3816-4644-9e33-3762615249e3\" (UID: \"55be79ce-3816-4644-9e33-3762615249e3\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728367 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728427 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gksx4\" (UniqueName: \"kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728460 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.728603 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs" (OuterVolumeSpecName: "logs") pod "55be79ce-3816-4644-9e33-3762615249e3" (UID: "55be79ce-3816-4644-9e33-3762615249e3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.729612 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc555\" (UniqueName: \"kubernetes.io/projected/55be79ce-3816-4644-9e33-3762615249e3-kube-api-access-vc555\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.729635 4938 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/55be79ce-3816-4644-9e33-3762615249e3-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.729647 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55be79ce-3816-4644-9e33-3762615249e3-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.739094 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.751461 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4" (OuterVolumeSpecName: "kube-api-access-gksx4") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "kube-api-access-gksx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.768707 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data" (OuterVolumeSpecName: "config-data") pod "55be79ce-3816-4644-9e33-3762615249e3" (UID: "55be79ce-3816-4644-9e33-3762615249e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.777591 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.798531 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts" (OuterVolumeSpecName: "scripts") pod "55be79ce-3816-4644-9e33-3762615249e3" (UID: "55be79ce-3816-4644-9e33-3762615249e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.831577 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.831659 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.831693 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts\") pod \"12962edb-6a81-4491-8ae2-85efe7102284\" (UID: \"12962edb-6a81-4491-8ae2-85efe7102284\") " Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832006 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832488 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832518 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55be79ce-3816-4644-9e33-3762615249e3-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832529 4938 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832541 4938 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832555 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gksx4\" (UniqueName: \"kubernetes.io/projected/12962edb-6a81-4491-8ae2-85efe7102284-kube-api-access-gksx4\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.832566 4938 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/12962edb-6a81-4491-8ae2-85efe7102284-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.834715 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts" (OuterVolumeSpecName: "scripts") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.843122 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.939026 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.939054 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.977028 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data" (OuterVolumeSpecName: "config-data") pod "12962edb-6a81-4491-8ae2-85efe7102284" (UID: "12962edb-6a81-4491-8ae2-85efe7102284"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.987011 4938 generic.go:334] "Generic (PLEG): container finished" podID="03728744-ec67-4880-9913-40bd5e011e60" containerID="ca1d6435f4c16f63e13af258476bbcd307e59a65ef20abff44aec9b14117ef8a" exitCode=0 Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.987156 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerDied","Data":"ca1d6435f4c16f63e13af258476bbcd307e59a65ef20abff44aec9b14117ef8a"} Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.996729 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84db767bd9-qw4kz" Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.997858 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84db767bd9-qw4kz" event={"ID":"55be79ce-3816-4644-9e33-3762615249e3","Type":"ContainerDied","Data":"7d246379168f85ddf55c6d664d0c8f0547674b53b4cd82b40dcda7e50fd3a0bc"} Nov 22 10:59:21 crc kubenswrapper[4938]: I1122 10:59:21.997932 4938 scope.go:117] "RemoveContainer" containerID="bb99bce6487ce815a7c8d32fd09b41dee4ef133b654c9b9497375a1cd73f1a6b" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.014999 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"12962edb-6a81-4491-8ae2-85efe7102284","Type":"ContainerDied","Data":"acb3b45e3be4e59fbb88244e35b2042c5652e40a5e2dd96fbcb5e29e06a71305"} Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.015273 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.027532 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.9439178259999998 podStartE2EDuration="18.027515448s" podCreationTimestamp="2025-11-22 10:59:04 +0000 UTC" firstStartedPulling="2025-11-22 10:59:05.568015779 +0000 UTC m=+1278.035853168" lastFinishedPulling="2025-11-22 10:59:21.651613391 +0000 UTC m=+1294.119450790" observedRunningTime="2025-11-22 10:59:22.02719764 +0000 UTC m=+1294.495035039" watchObservedRunningTime="2025-11-22 10:59:22.027515448 +0000 UTC m=+1294.495352847" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.043135 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12962edb-6a81-4491-8ae2-85efe7102284-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.062197 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.082519 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.088059 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.108948 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84db767bd9-qw4kz"] Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.116977 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.159670 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180430 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180850 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="proxy-httpd" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180868 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="proxy-httpd" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180880 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" containerName="kube-state-metrics" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180888 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" containerName="kube-state-metrics" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180934 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180943 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180952 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180958 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180965 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-notification-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180971 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-notification-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.180992 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api-log" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.180999 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api-log" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.181009 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon-log" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181015 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon-log" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.181026 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="sg-core" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181034 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="sg-core" Nov 22 10:59:22 crc kubenswrapper[4938]: E1122 10:59:22.181048 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-central-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181055 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-central-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181237 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api-log" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181251 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181264 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-central-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181277 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="ceilometer-notification-agent" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181291 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="sg-core" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181305 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="03728744-ec67-4880-9913-40bd5e011e60" containerName="barbican-api" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181314 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" containerName="kube-state-metrics" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181320 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="55be79ce-3816-4644-9e33-3762615249e3" containerName="horizon-log" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.181329 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="12962edb-6a81-4491-8ae2-85efe7102284" containerName="proxy-httpd" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.187606 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.191256 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.191485 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.217390 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.257666 4938 scope.go:117] "RemoveContainer" containerID="2d2f992d56e048a9a6b64125a55a634ae075ae2da25c37cfa892307d7c8fb3e1" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260316 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom\") pod \"03728744-ec67-4880-9913-40bd5e011e60\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260389 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle\") pod \"03728744-ec67-4880-9913-40bd5e011e60\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260416 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data\") pod \"03728744-ec67-4880-9913-40bd5e011e60\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260476 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt26k\" (UniqueName: \"kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k\") pod \"03728744-ec67-4880-9913-40bd5e011e60\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260529 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs\") pod \"03728744-ec67-4880-9913-40bd5e011e60\" (UID: \"03728744-ec67-4880-9913-40bd5e011e60\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260621 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kb2f2\" (UniqueName: \"kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2\") pod \"15e6e3f1-ae9e-4a70-8342-74d6554ec24c\" (UID: \"15e6e3f1-ae9e-4a70-8342-74d6554ec24c\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260814 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260874 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.260973 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz9ll\" (UniqueName: \"kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.261025 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.261046 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.261107 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.261131 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.261328 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs" (OuterVolumeSpecName: "logs") pod "03728744-ec67-4880-9913-40bd5e011e60" (UID: "03728744-ec67-4880-9913-40bd5e011e60"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.266134 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "03728744-ec67-4880-9913-40bd5e011e60" (UID: "03728744-ec67-4880-9913-40bd5e011e60"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.266498 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k" (OuterVolumeSpecName: "kube-api-access-rt26k") pod "03728744-ec67-4880-9913-40bd5e011e60" (UID: "03728744-ec67-4880-9913-40bd5e011e60"). InnerVolumeSpecName "kube-api-access-rt26k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.267642 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2" (OuterVolumeSpecName: "kube-api-access-kb2f2") pod "15e6e3f1-ae9e-4a70-8342-74d6554ec24c" (UID: "15e6e3f1-ae9e-4a70-8342-74d6554ec24c"). InnerVolumeSpecName "kube-api-access-kb2f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.275723 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.345083 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03728744-ec67-4880-9913-40bd5e011e60" (UID: "03728744-ec67-4880-9913-40bd5e011e60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362064 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362181 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362229 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362248 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362362 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq4nr\" (UniqueName: \"kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362396 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle\") pod \"278730a8-49d3-465d-a6b3-49f52848cc46\" (UID: \"278730a8-49d3-465d-a6b3-49f52848cc46\") " Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362630 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz9ll\" (UniqueName: \"kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362674 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362690 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362727 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362743 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362775 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362818 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362864 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362877 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt26k\" (UniqueName: \"kubernetes.io/projected/03728744-ec67-4880-9913-40bd5e011e60-kube-api-access-rt26k\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362892 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03728744-ec67-4880-9913-40bd5e011e60-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362906 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kb2f2\" (UniqueName: \"kubernetes.io/projected/15e6e3f1-ae9e-4a70-8342-74d6554ec24c-kube-api-access-kb2f2\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.362934 4938 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.363976 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.368993 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.369064 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data" (OuterVolumeSpecName: "config-data") pod "03728744-ec67-4880-9913-40bd5e011e60" (UID: "03728744-ec67-4880-9913-40bd5e011e60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.370374 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.370659 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.374399 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr" (OuterVolumeSpecName: "kube-api-access-sq4nr") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "kube-api-access-sq4nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.374662 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.375461 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.378209 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.386240 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz9ll\" (UniqueName: \"kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.396296 4938 scope.go:117] "RemoveContainer" containerID="4e7803c1f3f27e0b753d33402a9fcf9c26ef0b2cabbeb21a566a734c28b67556" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.399226 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts" (OuterVolumeSpecName: "scripts") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.408453 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.465521 4938 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.465567 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.465579 4938 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/278730a8-49d3-465d-a6b3-49f52848cc46-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.465590 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03728744-ec67-4880-9913-40bd5e011e60-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.465598 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq4nr\" (UniqueName: \"kubernetes.io/projected/278730a8-49d3-465d-a6b3-49f52848cc46-kube-api-access-sq4nr\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.466148 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12962edb-6a81-4491-8ae2-85efe7102284" path="/var/lib/kubelet/pods/12962edb-6a81-4491-8ae2-85efe7102284/volumes" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.467306 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55be79ce-3816-4644-9e33-3762615249e3" path="/var/lib/kubelet/pods/55be79ce-3816-4644-9e33-3762615249e3/volumes" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.480380 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.505575 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data" (OuterVolumeSpecName: "config-data") pod "278730a8-49d3-465d-a6b3-49f52848cc46" (UID: "278730a8-49d3-465d-a6b3-49f52848cc46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.514372 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.567420 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.567458 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/278730a8-49d3-465d-a6b3-49f52848cc46-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.682065 4938 scope.go:117] "RemoveContainer" containerID="2c28d27fbd9e0a4b8deea8ae9420b5fd5d5b141cd99c146a17e89bbfcbcb8d97" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.722644 4938 scope.go:117] "RemoveContainer" containerID="806e9b3c1459f2976be2e86e8e13e39d6942530ce51145e93a9031cb96c9ab2b" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.757075 4938 scope.go:117] "RemoveContainer" containerID="c9ab59bc6f8122d1a745c1e14123d3595e9efcc4b08fb7eea8af7ad85061f75d" Nov 22 10:59:22 crc kubenswrapper[4938]: I1122 10:59:22.806833 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.026339 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"15e6e3f1-ae9e-4a70-8342-74d6554ec24c","Type":"ContainerDied","Data":"57ac01815fc4b156d536faf55964bd2f26226c6e71b2fd5b5171639a6b60f8b1"} Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.026377 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.026409 4938 scope.go:117] "RemoveContainer" containerID="1e006da9203224e6cd4b9b07506bbd01e2737b7e51070df4e965e9ade9faf8e2" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.037537 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"cc16df53-2254-4dc7-8914-88afcbc0b5c4","Type":"ContainerStarted","Data":"7c4377e4d04d064c3cb9ebb654ecb73d68bdae9c32fbc643f5bcc53e538c48a3"} Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.044587 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-84958d7694-crwst" event={"ID":"03728744-ec67-4880-9913-40bd5e011e60","Type":"ContainerDied","Data":"139a775eb94aa293ec0b59fec273c2d441728dc07f19fd71c9792ffd0fa090c1"} Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.044703 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-84958d7694-crwst" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.052412 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"278730a8-49d3-465d-a6b3-49f52848cc46","Type":"ContainerDied","Data":"64190bebc34fe179c5a2e37d0d68cec60bee909e7651a9c36bf781fd53f88ee5"} Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.052455 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.052631 4938 scope.go:117] "RemoveContainer" containerID="ca1d6435f4c16f63e13af258476bbcd307e59a65ef20abff44aec9b14117ef8a" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.054121 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.080869 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.105888 4938 scope.go:117] "RemoveContainer" containerID="9da44cf998ea3acc76355c2a0080d603778a5d70020ca99ff26fb7040e78cab6" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.127798 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.156705 4938 scope.go:117] "RemoveContainer" containerID="596c968b22cb20e10118ac04c47d10b22d78612800d0cab62a22fe6e3cdc8363" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.161570 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: E1122 10:59:23.162272 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="probe" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.162390 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="probe" Nov 22 10:59:23 crc kubenswrapper[4938]: E1122 10:59:23.162488 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="cinder-scheduler" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.162569 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="cinder-scheduler" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.162938 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="cinder-scheduler" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.163065 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" containerName="probe" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.163891 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.167482 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.167927 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.188336 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.188786 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-745j8\" (UniqueName: \"kubernetes.io/projected/190d6459-e173-4817-a60a-b204a9a4bf68-kube-api-access-745j8\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.188906 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.189195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.190150 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.199313 4938 scope.go:117] "RemoveContainer" containerID="3078b352839c9f2f1963d0c8a76c468373dea2d26127078f1e5e4199a6e0074a" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.199747 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-84958d7694-crwst"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.210131 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.219423 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.228749 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.237511 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.239192 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.242363 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.246196 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304055 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304146 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304192 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304219 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-745j8\" (UniqueName: \"kubernetes.io/projected/190d6459-e173-4817-a60a-b204a9a4bf68-kube-api-access-745j8\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304236 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304295 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304314 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c5c1e05-59a2-49d5-9bbc-315dc537b994-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304355 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304398 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.304431 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krt8n\" (UniqueName: \"kubernetes.io/projected/4c5c1e05-59a2-49d5-9bbc-315dc537b994-kube-api-access-krt8n\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.311579 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.313661 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.317595 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/190d6459-e173-4817-a60a-b204a9a4bf68-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.339691 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-745j8\" (UniqueName: \"kubernetes.io/projected/190d6459-e173-4817-a60a-b204a9a4bf68-kube-api-access-745j8\") pod \"kube-state-metrics-0\" (UID: \"190d6459-e173-4817-a60a-b204a9a4bf68\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.405868 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.405938 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krt8n\" (UniqueName: \"kubernetes.io/projected/4c5c1e05-59a2-49d5-9bbc-315dc537b994-kube-api-access-krt8n\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.405969 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.405987 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.406058 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.406077 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c5c1e05-59a2-49d5-9bbc-315dc537b994-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.406151 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c5c1e05-59a2-49d5-9bbc-315dc537b994-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.409666 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.411111 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.413638 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.414538 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c5c1e05-59a2-49d5-9bbc-315dc537b994-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.439565 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krt8n\" (UniqueName: \"kubernetes.io/projected/4c5c1e05-59a2-49d5-9bbc-315dc537b994-kube-api-access-krt8n\") pod \"cinder-scheduler-0\" (UID: \"4c5c1e05-59a2-49d5-9bbc-315dc537b994\") " pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.491491 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.565315 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4938]: I1122 10:59:23.964260 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4938]: W1122 10:59:23.973010 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod190d6459_e173_4817_a60a_b204a9a4bf68.slice/crio-0fc5211739804ff1c4d1188ae8e8f7f93882679b2fcf87d57a94e4f091be352c WatchSource:0}: Error finding container 0fc5211739804ff1c4d1188ae8e8f7f93882679b2fcf87d57a94e4f091be352c: Status 404 returned error can't find the container with id 0fc5211739804ff1c4d1188ae8e8f7f93882679b2fcf87d57a94e4f091be352c Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.069592 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"190d6459-e173-4817-a60a-b204a9a4bf68","Type":"ContainerStarted","Data":"0fc5211739804ff1c4d1188ae8e8f7f93882679b2fcf87d57a94e4f091be352c"} Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.082478 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerStarted","Data":"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78"} Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.082534 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerStarted","Data":"adc987f8237b108f7e80134865b4237735a251ad7363daa82c846de79ea5b7a7"} Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.097079 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.467714 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03728744-ec67-4880-9913-40bd5e011e60" path="/var/lib/kubelet/pods/03728744-ec67-4880-9913-40bd5e011e60/volumes" Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.468593 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e6e3f1-ae9e-4a70-8342-74d6554ec24c" path="/var/lib/kubelet/pods/15e6e3f1-ae9e-4a70-8342-74d6554ec24c/volumes" Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.469330 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="278730a8-49d3-465d-a6b3-49f52848cc46" path="/var/lib/kubelet/pods/278730a8-49d3-465d-a6b3-49f52848cc46/volumes" Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.780747 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-j5hgn"] Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.781986 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.802094 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-j5hgn"] Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.873887 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-7lllj"] Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.879532 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.901644 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7lllj"] Nov 22 10:59:24 crc kubenswrapper[4938]: I1122 10:59:24.930331 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m826m\" (UniqueName: \"kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m\") pod \"nova-api-db-create-j5hgn\" (UID: \"dea066c0-8c10-4c73-8f35-4ad99c4fd251\") " pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.032612 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-285fz\" (UniqueName: \"kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz\") pod \"nova-cell0-db-create-7lllj\" (UID: \"8481f6ff-a8d4-40ea-80b0-1076f6b60c61\") " pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.032709 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m826m\" (UniqueName: \"kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m\") pod \"nova-api-db-create-j5hgn\" (UID: \"dea066c0-8c10-4c73-8f35-4ad99c4fd251\") " pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.055890 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m826m\" (UniqueName: \"kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m\") pod \"nova-api-db-create-j5hgn\" (UID: \"dea066c0-8c10-4c73-8f35-4ad99c4fd251\") " pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.079259 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-v28zg"] Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.080691 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.102893 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-v28zg"] Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.104505 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.134794 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-285fz\" (UniqueName: \"kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz\") pod \"nova-cell0-db-create-7lllj\" (UID: \"8481f6ff-a8d4-40ea-80b0-1076f6b60c61\") " pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.155681 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-285fz\" (UniqueName: \"kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz\") pod \"nova-cell0-db-create-7lllj\" (UID: \"8481f6ff-a8d4-40ea-80b0-1076f6b60c61\") " pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.166090 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c5c1e05-59a2-49d5-9bbc-315dc537b994","Type":"ContainerStarted","Data":"f50cba27246b71662e3f66474cac4c0e53d378421289c2b820fe2c50450c3390"} Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.166128 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c5c1e05-59a2-49d5-9bbc-315dc537b994","Type":"ContainerStarted","Data":"63edaa743f3a52144620658aefdaa2c0b84a4e8d6b8237e27319c3fbfe225c6b"} Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.173757 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"190d6459-e173-4817-a60a-b204a9a4bf68","Type":"ContainerStarted","Data":"9d927caa21024760cc7fab54226f3dbad5f856072f3889d569e1c02bf355308f"} Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.174782 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.179481 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerStarted","Data":"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1"} Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.198472 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.200600 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.733299033 podStartE2EDuration="2.200573415s" podCreationTimestamp="2025-11-22 10:59:23 +0000 UTC" firstStartedPulling="2025-11-22 10:59:23.975845579 +0000 UTC m=+1296.443682978" lastFinishedPulling="2025-11-22 10:59:24.443119961 +0000 UTC m=+1296.910957360" observedRunningTime="2025-11-22 10:59:25.189440559 +0000 UTC m=+1297.657277968" watchObservedRunningTime="2025-11-22 10:59:25.200573415 +0000 UTC m=+1297.668410824" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.236092 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gmq6\" (UniqueName: \"kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6\") pod \"nova-cell1-db-create-v28zg\" (UID: \"a8cdfce8-5355-49f9-997d-04cac912ca12\") " pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.338348 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gmq6\" (UniqueName: \"kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6\") pod \"nova-cell1-db-create-v28zg\" (UID: \"a8cdfce8-5355-49f9-997d-04cac912ca12\") " pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.365678 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gmq6\" (UniqueName: \"kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6\") pod \"nova-cell1-db-create-v28zg\" (UID: \"a8cdfce8-5355-49f9-997d-04cac912ca12\") " pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.402430 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.693132 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-j5hgn"] Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.864676 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7lllj"] Nov 22 10:59:25 crc kubenswrapper[4938]: W1122 10:59:25.866203 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8481f6ff_a8d4_40ea_80b0_1076f6b60c61.slice/crio-7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8 WatchSource:0}: Error finding container 7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8: Status 404 returned error can't find the container with id 7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8 Nov 22 10:59:25 crc kubenswrapper[4938]: I1122 10:59:25.931406 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-v28zg"] Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.193370 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v28zg" event={"ID":"a8cdfce8-5355-49f9-997d-04cac912ca12","Type":"ContainerStarted","Data":"ebb74c875b26fd6b538c39832800c7a343ddb88cc06ca0ab9785e05d5e8800f4"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.193452 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v28zg" event={"ID":"a8cdfce8-5355-49f9-997d-04cac912ca12","Type":"ContainerStarted","Data":"72fba1a71c67ea3bce29d1ee0c2467dcae0a2549bd4166c89a6bdc13cf4216d4"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.196183 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c5c1e05-59a2-49d5-9bbc-315dc537b994","Type":"ContainerStarted","Data":"321fdf333aea639c874d92b467560b4490c9b5f4026fd0b15553d3ccbff3bfc1"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.199576 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7lllj" event={"ID":"8481f6ff-a8d4-40ea-80b0-1076f6b60c61","Type":"ContainerStarted","Data":"352b329b5028da50f16f1ba670b49148c2ecbbb419fab12396c5a995adf7f81d"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.199634 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7lllj" event={"ID":"8481f6ff-a8d4-40ea-80b0-1076f6b60c61","Type":"ContainerStarted","Data":"7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.201857 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerStarted","Data":"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.203753 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j5hgn" event={"ID":"dea066c0-8c10-4c73-8f35-4ad99c4fd251","Type":"ContainerStarted","Data":"05901d070c3e91e022199beccc8942e050f0e702038e1b0c3b7d37c1d300c860"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.203810 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j5hgn" event={"ID":"dea066c0-8c10-4c73-8f35-4ad99c4fd251","Type":"ContainerStarted","Data":"e25a377e4f1c1aae437e68fbdc29bc9d2a89107c8a81cbd493541573ae5e5795"} Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.219302 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.219284265 podStartE2EDuration="3.219284265s" podCreationTimestamp="2025-11-22 10:59:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:26.216960388 +0000 UTC m=+1298.684797787" watchObservedRunningTime="2025-11-22 10:59:26.219284265 +0000 UTC m=+1298.687121664" Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.236870 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-j5hgn" podStartSLOduration=2.236855601 podStartE2EDuration="2.236855601s" podCreationTimestamp="2025-11-22 10:59:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:26.229785355 +0000 UTC m=+1298.697622774" watchObservedRunningTime="2025-11-22 10:59:26.236855601 +0000 UTC m=+1298.704693000" Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.929767 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.930321 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-log" containerID="cri-o://10667d768b684e8257be7d5e1e8519ba223853cbc1661fb52f4c310bd1e3f4d8" gracePeriod=30 Nov 22 10:59:26 crc kubenswrapper[4938]: I1122 10:59:26.930748 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-httpd" containerID="cri-o://8591f8da93d80894ea2b01c1c54cfefd76185e5f99720f6642c9d78890600653" gracePeriod=30 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.219717 4938 generic.go:334] "Generic (PLEG): container finished" podID="dea066c0-8c10-4c73-8f35-4ad99c4fd251" containerID="05901d070c3e91e022199beccc8942e050f0e702038e1b0c3b7d37c1d300c860" exitCode=0 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.221041 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j5hgn" event={"ID":"dea066c0-8c10-4c73-8f35-4ad99c4fd251","Type":"ContainerDied","Data":"05901d070c3e91e022199beccc8942e050f0e702038e1b0c3b7d37c1d300c860"} Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.231375 4938 generic.go:334] "Generic (PLEG): container finished" podID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerID="10667d768b684e8257be7d5e1e8519ba223853cbc1661fb52f4c310bd1e3f4d8" exitCode=143 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.231728 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerDied","Data":"10667d768b684e8257be7d5e1e8519ba223853cbc1661fb52f4c310bd1e3f4d8"} Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.241434 4938 generic.go:334] "Generic (PLEG): container finished" podID="a8cdfce8-5355-49f9-997d-04cac912ca12" containerID="ebb74c875b26fd6b538c39832800c7a343ddb88cc06ca0ab9785e05d5e8800f4" exitCode=0 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.241951 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v28zg" event={"ID":"a8cdfce8-5355-49f9-997d-04cac912ca12","Type":"ContainerDied","Data":"ebb74c875b26fd6b538c39832800c7a343ddb88cc06ca0ab9785e05d5e8800f4"} Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.245319 4938 generic.go:334] "Generic (PLEG): container finished" podID="8481f6ff-a8d4-40ea-80b0-1076f6b60c61" containerID="352b329b5028da50f16f1ba670b49148c2ecbbb419fab12396c5a995adf7f81d" exitCode=0 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.246064 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7lllj" event={"ID":"8481f6ff-a8d4-40ea-80b0-1076f6b60c61","Type":"ContainerDied","Data":"352b329b5028da50f16f1ba670b49148c2ecbbb419fab12396c5a995adf7f81d"} Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.858481 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.859039 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-log" containerID="cri-o://ddf5625e8f5b75203a221aaefdde98986607e3019b1c39cfec3f0949198e39a6" gracePeriod=30 Nov 22 10:59:27 crc kubenswrapper[4938]: I1122 10:59:27.859153 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-httpd" containerID="cri-o://fb76004ce1761d61653c70811b5cfea9222d2c30584753513dcca537b1acfe18" gracePeriod=30 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.255119 4938 generic.go:334] "Generic (PLEG): container finished" podID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerID="0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5" exitCode=1 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.255190 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerDied","Data":"0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5"} Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.255361 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-central-agent" containerID="cri-o://0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78" gracePeriod=30 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.255715 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="sg-core" containerID="cri-o://2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547" gracePeriod=30 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.255730 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-notification-agent" containerID="cri-o://ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1" gracePeriod=30 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.261079 4938 generic.go:334] "Generic (PLEG): container finished" podID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerID="ddf5625e8f5b75203a221aaefdde98986607e3019b1c39cfec3f0949198e39a6" exitCode=143 Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.261192 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerDied","Data":"ddf5625e8f5b75203a221aaefdde98986607e3019b1c39cfec3f0949198e39a6"} Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.565522 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.782117 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.793633 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.804145 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.925420 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-285fz\" (UniqueName: \"kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz\") pod \"8481f6ff-a8d4-40ea-80b0-1076f6b60c61\" (UID: \"8481f6ff-a8d4-40ea-80b0-1076f6b60c61\") " Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.925570 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m826m\" (UniqueName: \"kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m\") pod \"dea066c0-8c10-4c73-8f35-4ad99c4fd251\" (UID: \"dea066c0-8c10-4c73-8f35-4ad99c4fd251\") " Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.925736 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gmq6\" (UniqueName: \"kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6\") pod \"a8cdfce8-5355-49f9-997d-04cac912ca12\" (UID: \"a8cdfce8-5355-49f9-997d-04cac912ca12\") " Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.932220 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m" (OuterVolumeSpecName: "kube-api-access-m826m") pod "dea066c0-8c10-4c73-8f35-4ad99c4fd251" (UID: "dea066c0-8c10-4c73-8f35-4ad99c4fd251"). InnerVolumeSpecName "kube-api-access-m826m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.932351 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6" (OuterVolumeSpecName: "kube-api-access-8gmq6") pod "a8cdfce8-5355-49f9-997d-04cac912ca12" (UID: "a8cdfce8-5355-49f9-997d-04cac912ca12"). InnerVolumeSpecName "kube-api-access-8gmq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:28 crc kubenswrapper[4938]: I1122 10:59:28.932845 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz" (OuterVolumeSpecName: "kube-api-access-285fz") pod "8481f6ff-a8d4-40ea-80b0-1076f6b60c61" (UID: "8481f6ff-a8d4-40ea-80b0-1076f6b60c61"). InnerVolumeSpecName "kube-api-access-285fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.027444 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gmq6\" (UniqueName: \"kubernetes.io/projected/a8cdfce8-5355-49f9-997d-04cac912ca12-kube-api-access-8gmq6\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.027714 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-285fz\" (UniqueName: \"kubernetes.io/projected/8481f6ff-a8d4-40ea-80b0-1076f6b60c61-kube-api-access-285fz\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.027724 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m826m\" (UniqueName: \"kubernetes.io/projected/dea066c0-8c10-4c73-8f35-4ad99c4fd251-kube-api-access-m826m\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.203375 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.289768 4938 generic.go:334] "Generic (PLEG): container finished" podID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerID="2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547" exitCode=2 Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.289815 4938 generic.go:334] "Generic (PLEG): container finished" podID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerID="ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1" exitCode=0 Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.289849 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerDied","Data":"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547"} Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.289963 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerDied","Data":"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1"} Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.294390 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-j5hgn" event={"ID":"dea066c0-8c10-4c73-8f35-4ad99c4fd251","Type":"ContainerDied","Data":"e25a377e4f1c1aae437e68fbdc29bc9d2a89107c8a81cbd493541573ae5e5795"} Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.294436 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e25a377e4f1c1aae437e68fbdc29bc9d2a89107c8a81cbd493541573ae5e5795" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.294575 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-j5hgn" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.299074 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v28zg" event={"ID":"a8cdfce8-5355-49f9-997d-04cac912ca12","Type":"ContainerDied","Data":"72fba1a71c67ea3bce29d1ee0c2467dcae0a2549bd4166c89a6bdc13cf4216d4"} Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.299143 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72fba1a71c67ea3bce29d1ee0c2467dcae0a2549bd4166c89a6bdc13cf4216d4" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.299224 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v28zg" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.314611 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7lllj" event={"ID":"8481f6ff-a8d4-40ea-80b0-1076f6b60c61","Type":"ContainerDied","Data":"7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8"} Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.314663 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7656f7cd35d46286cd72f6185850339be6d13cf33c181b9065d8a1d5e0b4a4b8" Nov 22 10:59:29 crc kubenswrapper[4938]: I1122 10:59:29.314757 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7lllj" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.049570 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.100793 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.153:9292/healthcheck\": read tcp 10.217.0.2:33522->10.217.0.153:9292: read: connection reset by peer" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.100852 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.153:9292/healthcheck\": read tcp 10.217.0.2:33524->10.217.0.153:9292: read: connection reset by peer" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150638 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz9ll\" (UniqueName: \"kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150716 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150797 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150826 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150866 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150929 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.150990 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd\") pod \"ceb91968-b61e-4cfa-b939-7f475cdde24d\" (UID: \"ceb91968-b61e-4cfa-b939-7f475cdde24d\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.151900 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.152140 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.156246 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll" (OuterVolumeSpecName: "kube-api-access-gz9ll") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "kube-api-access-gz9ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.156754 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts" (OuterVolumeSpecName: "scripts") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.181046 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.237833 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253891 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz9ll\" (UniqueName: \"kubernetes.io/projected/ceb91968-b61e-4cfa-b939-7f475cdde24d-kube-api-access-gz9ll\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253938 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253947 4938 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253959 4938 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253967 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.253976 4938 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ceb91968-b61e-4cfa-b939-7f475cdde24d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.269414 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data" (OuterVolumeSpecName: "config-data") pod "ceb91968-b61e-4cfa-b939-7f475cdde24d" (UID: "ceb91968-b61e-4cfa-b939-7f475cdde24d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.324870 4938 generic.go:334] "Generic (PLEG): container finished" podID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerID="8591f8da93d80894ea2b01c1c54cfefd76185e5f99720f6642c9d78890600653" exitCode=0 Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.324941 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerDied","Data":"8591f8da93d80894ea2b01c1c54cfefd76185e5f99720f6642c9d78890600653"} Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.327332 4938 generic.go:334] "Generic (PLEG): container finished" podID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerID="0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78" exitCode=0 Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.327373 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerDied","Data":"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78"} Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.327406 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ceb91968-b61e-4cfa-b939-7f475cdde24d","Type":"ContainerDied","Data":"adc987f8237b108f7e80134865b4237735a251ad7363daa82c846de79ea5b7a7"} Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.327431 4938 scope.go:117] "RemoveContainer" containerID="0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.327438 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.353546 4938 scope.go:117] "RemoveContainer" containerID="2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.368175 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ceb91968-b61e-4cfa-b939-7f475cdde24d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.369388 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.382679 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.386141 4938 scope.go:117] "RemoveContainer" containerID="ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402313 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402736 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8cdfce8-5355-49f9-997d-04cac912ca12" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402758 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8cdfce8-5355-49f9-997d-04cac912ca12" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402781 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402789 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402797 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402804 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402820 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8481f6ff-a8d4-40ea-80b0-1076f6b60c61" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402826 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8481f6ff-a8d4-40ea-80b0-1076f6b60c61" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402843 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402849 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402862 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dea066c0-8c10-4c73-8f35-4ad99c4fd251" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402867 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dea066c0-8c10-4c73-8f35-4ad99c4fd251" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.402892 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.402900 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403131 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8cdfce8-5355-49f9-997d-04cac912ca12" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403152 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403166 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403184 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="dea066c0-8c10-4c73-8f35-4ad99c4fd251" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403201 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8481f6ff-a8d4-40ea-80b0-1076f6b60c61" containerName="mariadb-database-create" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403212 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.403224 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.404965 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.407547 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.407750 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.407905 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.423314 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.424977 4938 scope.go:117] "RemoveContainer" containerID="0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.446184 4938 scope.go:117] "RemoveContainer" containerID="0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.452363 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5\": container with ID starting with 0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5 not found: ID does not exist" containerID="0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.452405 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5"} err="failed to get container status \"0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5\": rpc error: code = NotFound desc = could not find container \"0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5\": container with ID starting with 0dac6bc0d6b6f464822dfdeb7239b55b93fadd83f64b5302767d4b449508a8c5 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.452428 4938 scope.go:117] "RemoveContainer" containerID="2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.453971 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547\": container with ID starting with 2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547 not found: ID does not exist" containerID="2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.453992 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547"} err="failed to get container status \"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547\": rpc error: code = NotFound desc = could not find container \"2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547\": container with ID starting with 2060fc96b85adc8da9f97f0f91f989e7c5fed8e28dd2e1097b8610a4a3ef4547 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.454006 4938 scope.go:117] "RemoveContainer" containerID="ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.456194 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1\": container with ID starting with ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1 not found: ID does not exist" containerID="ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.456221 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1"} err="failed to get container status \"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1\": rpc error: code = NotFound desc = could not find container \"ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1\": container with ID starting with ffbb0889372a31be935b64ae1e256368779d55418c92c2774cd0cac98d1c65b1 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.456382 4938 scope.go:117] "RemoveContainer" containerID="0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78" Nov 22 10:59:30 crc kubenswrapper[4938]: E1122 10:59:30.457874 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78\": container with ID starting with 0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78 not found: ID does not exist" containerID="0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.457925 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78"} err="failed to get container status \"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78\": rpc error: code = NotFound desc = could not find container \"0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78\": container with ID starting with 0f1c859679b8d59fc7ef5528f2d3eaab3f751a012e1516244be8aaafd7b37e78 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.458318 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceb91968-b61e-4cfa-b939-7f475cdde24d" path="/var/lib/kubelet/pods/ceb91968-b61e-4cfa-b939-7f475cdde24d/volumes" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571533 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571580 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571609 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571633 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571682 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571727 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571756 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7765l\" (UniqueName: \"kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.571879 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.594854 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.672996 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673062 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dkhd\" (UniqueName: \"kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673122 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673214 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673319 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673376 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673402 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673432 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"db1f8068-a414-40bf-984e-2a8a44d2ce07\" (UID: \"db1f8068-a414-40bf-984e-2a8a44d2ce07\") " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673782 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673849 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673870 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673893 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673934 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.673985 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.674020 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.674053 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7765l\" (UniqueName: \"kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.674180 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs" (OuterVolumeSpecName: "logs") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.675305 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.675653 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.675866 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.679002 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.679173 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.679689 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.681564 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts" (OuterVolumeSpecName: "scripts") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.683732 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.684608 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.687305 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.690104 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd" (OuterVolumeSpecName: "kube-api-access-6dkhd") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "kube-api-access-6dkhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.693435 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7765l\" (UniqueName: \"kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l\") pod \"ceilometer-0\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.710484 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.726603 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.732759 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.746755 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data" (OuterVolumeSpecName: "config-data") pod "db1f8068-a414-40bf-984e-2a8a44d2ce07" (UID: "db1f8068-a414-40bf-984e-2a8a44d2ce07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776574 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776607 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dkhd\" (UniqueName: \"kubernetes.io/projected/db1f8068-a414-40bf-984e-2a8a44d2ce07-kube-api-access-6dkhd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776617 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776625 4938 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776633 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db1f8068-a414-40bf-984e-2a8a44d2ce07-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776642 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776650 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/db1f8068-a414-40bf-984e-2a8a44d2ce07-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.776686 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.798571 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 22 10:59:30 crc kubenswrapper[4938]: I1122 10:59:30.878875 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.182895 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.341875 4938 generic.go:334] "Generic (PLEG): container finished" podID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerID="fb76004ce1761d61653c70811b5cfea9222d2c30584753513dcca537b1acfe18" exitCode=0 Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.341952 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerDied","Data":"fb76004ce1761d61653c70811b5cfea9222d2c30584753513dcca537b1acfe18"} Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.342800 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerStarted","Data":"522e30d2a37155ee937e79543e0e8d1999a0ce21dd65044e0fb4f8aec7e6cf1a"} Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.345737 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"db1f8068-a414-40bf-984e-2a8a44d2ce07","Type":"ContainerDied","Data":"1d427597fa84eeedaf8135626ff3ebde2109935aa0724af484b34afdd092ee3d"} Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.345798 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.345803 4938 scope.go:117] "RemoveContainer" containerID="8591f8da93d80894ea2b01c1c54cfefd76185e5f99720f6642c9d78890600653" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.378429 4938 scope.go:117] "RemoveContainer" containerID="10667d768b684e8257be7d5e1e8519ba223853cbc1661fb52f4c310bd1e3f4d8" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.404192 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.426099 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.439000 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:31 crc kubenswrapper[4938]: E1122 10:59:31.440223 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-httpd" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.440252 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-httpd" Nov 22 10:59:31 crc kubenswrapper[4938]: E1122 10:59:31.446667 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-log" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.446742 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-log" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.450059 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-httpd" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.450098 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" containerName="glance-log" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.457784 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.465514 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.465662 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.486259 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.526029 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.599880 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9mxr\" (UniqueName: \"kubernetes.io/projected/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-kube-api-access-c9mxr\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.600198 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.600400 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-config-data\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.601844 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.602036 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.602601 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.602761 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-logs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.602985 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-scripts\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.704794 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.704868 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.704961 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705177 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705236 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s9sk\" (UniqueName: \"kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705362 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705431 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705453 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs\") pod \"d29dc9e2-9a34-4127-95ec-100c5483b53c\" (UID: \"d29dc9e2-9a34-4127-95ec-100c5483b53c\") " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705855 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.705964 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706018 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-logs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706074 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-scripts\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706102 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9mxr\" (UniqueName: \"kubernetes.io/projected/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-kube-api-access-c9mxr\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706142 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706178 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-config-data\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706240 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.706456 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.707389 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs" (OuterVolumeSpecName: "logs") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.707571 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.708280 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.708778 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-logs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.711223 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts" (OuterVolumeSpecName: "scripts") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.711932 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.712182 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk" (OuterVolumeSpecName: "kube-api-access-5s9sk") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "kube-api-access-5s9sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.716215 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-config-data\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.716289 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-scripts\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.716870 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.724767 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.733274 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9mxr\" (UniqueName: \"kubernetes.io/projected/daf50ab9-a17b-4d53-a2f5-a1f11ed8455e-kube-api-access-c9mxr\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.755885 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e\") " pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.759592 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.784172 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.796377 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data" (OuterVolumeSpecName: "config-data") pod "d29dc9e2-9a34-4127-95ec-100c5483b53c" (UID: "d29dc9e2-9a34-4127-95ec-100c5483b53c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807559 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s9sk\" (UniqueName: \"kubernetes.io/projected/d29dc9e2-9a34-4127-95ec-100c5483b53c-kube-api-access-5s9sk\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807618 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807630 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807639 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d29dc9e2-9a34-4127-95ec-100c5483b53c-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807648 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807655 4938 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807663 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.807672 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d29dc9e2-9a34-4127-95ec-100c5483b53c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.833564 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.838699 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:59:31 crc kubenswrapper[4938]: I1122 10:59:31.909336 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.051037 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.217776 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.217854 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.217988 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218018 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218056 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mn2nj\" (UniqueName: \"kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218092 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218142 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218218 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle\") pod \"134cb80c-67f8-45ed-b602-68bce2f35109\" (UID: \"134cb80c-67f8-45ed-b602-68bce2f35109\") " Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218506 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs" (OuterVolumeSpecName: "logs") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218902 4938 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/134cb80c-67f8-45ed-b602-68bce2f35109-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.218934 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/134cb80c-67f8-45ed-b602-68bce2f35109-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.223414 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts" (OuterVolumeSpecName: "scripts") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.226384 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.266871 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj" (OuterVolumeSpecName: "kube-api-access-mn2nj") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "kube-api-access-mn2nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.320233 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mn2nj\" (UniqueName: \"kubernetes.io/projected/134cb80c-67f8-45ed-b602-68bce2f35109-kube-api-access-mn2nj\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.320274 4938 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.320286 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.323084 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.343088 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data" (OuterVolumeSpecName: "config-data") pod "134cb80c-67f8-45ed-b602-68bce2f35109" (UID: "134cb80c-67f8-45ed-b602-68bce2f35109"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.390763 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerStarted","Data":"8abf61375c89b59a71efcb27c00c5459babdb60a897166aa56f43b84f02e9f43"} Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.399223 4938 generic.go:334] "Generic (PLEG): container finished" podID="134cb80c-67f8-45ed-b602-68bce2f35109" containerID="88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f" exitCode=137 Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.399351 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerDied","Data":"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f"} Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.399374 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.399399 4938 scope.go:117] "RemoveContainer" containerID="88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.399386 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"134cb80c-67f8-45ed-b602-68bce2f35109","Type":"ContainerDied","Data":"52656e4ab4ef47de18a73b7c0a738de77bc52a1a071954c9dac6e943ab59d2a4"} Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.419034 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d29dc9e2-9a34-4127-95ec-100c5483b53c","Type":"ContainerDied","Data":"3e8b2f913c7e0da1ba6d3343eb52518d64dc4758fca14b4398e9b3289fdc2c16"} Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.419288 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.422627 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.422660 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/134cb80c-67f8-45ed-b602-68bce2f35109-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.507312 4938 scope.go:117] "RemoveContainer" containerID="9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.558684 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db1f8068-a414-40bf-984e-2a8a44d2ce07" path="/var/lib/kubelet/pods/db1f8068-a414-40bf-984e-2a8a44d2ce07/volumes" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.580348 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.591818 4938 scope.go:117] "RemoveContainer" containerID="88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f" Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.596852 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f\": container with ID starting with 88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f not found: ID does not exist" containerID="88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.596892 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f"} err="failed to get container status \"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f\": rpc error: code = NotFound desc = could not find container \"88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f\": container with ID starting with 88469184a213c4d53bf03ec80ecc82b405c52fdfa5be24084f0719ec7627482f not found: ID does not exist" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.596935 4938 scope.go:117] "RemoveContainer" containerID="9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45" Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.603216 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45\": container with ID starting with 9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45 not found: ID does not exist" containerID="9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.603286 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45"} err="failed to get container status \"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45\": rpc error: code = NotFound desc = could not find container \"9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45\": container with ID starting with 9d602853fb76980993bf25fb5bb0262c6a402ff282b8aa644a6d89a4ccf80b45 not found: ID does not exist" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.603339 4938 scope.go:117] "RemoveContainer" containerID="fb76004ce1761d61653c70811b5cfea9222d2c30584753513dcca537b1acfe18" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.605674 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.630462 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.650346 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.650790 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.650804 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api" Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.650818 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api-log" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.650824 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api-log" Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.650849 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-httpd" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.650855 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-httpd" Nov 22 10:59:32 crc kubenswrapper[4938]: E1122 10:59:32.650864 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-log" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.650870 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-log" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.651075 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-httpd" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.651092 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api-log" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.651103 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" containerName="glance-log" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.651115 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" containerName="cinder-api" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.652162 4938 scope.go:117] "RemoveContainer" containerID="ddf5625e8f5b75203a221aaefdde98986607e3019b1c39cfec3f0949198e39a6" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.652906 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.659008 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.659309 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.663625 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.688982 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.702484 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.712970 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.717295 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.719661 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.720643 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.720794 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.722627 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730728 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdzhg\" (UniqueName: \"kubernetes.io/projected/8585da6c-5a29-4a79-9aa4-5385381dfd08-kube-api-access-bdzhg\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730770 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730805 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730830 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730896 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-logs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730941 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.730963 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.731122 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832611 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-logs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832674 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832700 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832764 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832817 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-scripts\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832858 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sp2s\" (UniqueName: \"kubernetes.io/projected/2048ffcd-1faf-44c6-a1e6-425501f44282-kube-api-access-4sp2s\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832886 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.832983 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data-custom\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833018 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833050 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833083 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdzhg\" (UniqueName: \"kubernetes.io/projected/8585da6c-5a29-4a79-9aa4-5385381dfd08-kube-api-access-bdzhg\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833153 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833199 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833222 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2048ffcd-1faf-44c6-a1e6-425501f44282-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833249 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833276 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833310 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2048ffcd-1faf-44c6-a1e6-425501f44282-logs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.833823 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-logs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.834853 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8585da6c-5a29-4a79-9aa4-5385381dfd08-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.836616 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.840857 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.841659 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.853056 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.859511 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8585da6c-5a29-4a79-9aa4-5385381dfd08-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.870252 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdzhg\" (UniqueName: \"kubernetes.io/projected/8585da6c-5a29-4a79-9aa4-5385381dfd08-kube-api-access-bdzhg\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.878799 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"8585da6c-5a29-4a79-9aa4-5385381dfd08\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.883793 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936444 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sp2s\" (UniqueName: \"kubernetes.io/projected/2048ffcd-1faf-44c6-a1e6-425501f44282-kube-api-access-4sp2s\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936612 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936688 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data-custom\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936724 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936808 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2048ffcd-1faf-44c6-a1e6-425501f44282-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936832 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.936876 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2048ffcd-1faf-44c6-a1e6-425501f44282-logs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.937017 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.937073 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-scripts\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.947150 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2048ffcd-1faf-44c6-a1e6-425501f44282-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.947726 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2048ffcd-1faf-44c6-a1e6-425501f44282-logs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.950989 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-public-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.951012 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data-custom\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.951322 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-scripts\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.951658 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.955703 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-config-data\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.961021 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2048ffcd-1faf-44c6-a1e6-425501f44282-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.972503 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sp2s\" (UniqueName: \"kubernetes.io/projected/2048ffcd-1faf-44c6-a1e6-425501f44282-kube-api-access-4sp2s\") pod \"cinder-api-0\" (UID: \"2048ffcd-1faf-44c6-a1e6-425501f44282\") " pod="openstack/cinder-api-0" Nov 22 10:59:32 crc kubenswrapper[4938]: I1122 10:59:32.985559 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.041798 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.452642 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e","Type":"ContainerStarted","Data":"462bd111600785ab27089735af190330af6b54beacc88f9563a7cf6d9dc884aa"} Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.454646 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerStarted","Data":"9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769"} Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.508220 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.588612 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.699186 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:59:33 crc kubenswrapper[4938]: W1122 10:59:33.701060 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2048ffcd_1faf_44c6_a1e6_425501f44282.slice/crio-3624577c015b27e1098cd8a7a72f718f5d1744287ffae65dddbf0a15ec733eab WatchSource:0}: Error finding container 3624577c015b27e1098cd8a7a72f718f5d1744287ffae65dddbf0a15ec733eab: Status 404 returned error can't find the container with id 3624577c015b27e1098cd8a7a72f718f5d1744287ffae65dddbf0a15ec733eab Nov 22 10:59:33 crc kubenswrapper[4938]: I1122 10:59:33.848374 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.472944 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="134cb80c-67f8-45ed-b602-68bce2f35109" path="/var/lib/kubelet/pods/134cb80c-67f8-45ed-b602-68bce2f35109/volumes" Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.474803 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d29dc9e2-9a34-4127-95ec-100c5483b53c" path="/var/lib/kubelet/pods/d29dc9e2-9a34-4127-95ec-100c5483b53c/volumes" Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.491935 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e","Type":"ContainerStarted","Data":"39cf14b4b0930955e61c92c2b61f872711d856df1908c3f034a93c48913f9092"} Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.495596 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2048ffcd-1faf-44c6-a1e6-425501f44282","Type":"ContainerStarted","Data":"3624577c015b27e1098cd8a7a72f718f5d1744287ffae65dddbf0a15ec733eab"} Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.514366 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8585da6c-5a29-4a79-9aa4-5385381dfd08","Type":"ContainerStarted","Data":"c24ae269ebcac8ac0954a9e93a37a9201b4af15436e1d00649e2026fbcb186a0"} Nov 22 10:59:34 crc kubenswrapper[4938]: I1122 10:59:34.514444 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8585da6c-5a29-4a79-9aa4-5385381dfd08","Type":"ContainerStarted","Data":"890e304c241b4a28f963386e175c3c18dd81038e190089d43c257c56ddbe9f04"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.015980 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-3f44-account-create-vhdtx"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.017725 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.020594 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.028790 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3f44-account-create-vhdtx"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.099030 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2kjt\" (UniqueName: \"kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt\") pod \"nova-api-3f44-account-create-vhdtx\" (UID: \"9d38d750-11b4-4d05-a62b-86eb84910f96\") " pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.210581 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2kjt\" (UniqueName: \"kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt\") pod \"nova-api-3f44-account-create-vhdtx\" (UID: \"9d38d750-11b4-4d05-a62b-86eb84910f96\") " pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.216322 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-b693-account-create-rgkl7"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.217515 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.234317 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.250099 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2kjt\" (UniqueName: \"kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt\") pod \"nova-api-3f44-account-create-vhdtx\" (UID: \"9d38d750-11b4-4d05-a62b-86eb84910f96\") " pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.255003 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b693-account-create-rgkl7"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.313562 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdwd9\" (UniqueName: \"kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9\") pod \"nova-cell0-b693-account-create-rgkl7\" (UID: \"fca19b3c-65ae-4c37-99c6-bc28789b64fe\") " pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.327446 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7768cc7979-hrkwz" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.349899 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.408548 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.408778 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-798b4b7d-4sxkg" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-api" containerID="cri-o://d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e" gracePeriod=30 Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.409180 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-798b4b7d-4sxkg" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-httpd" containerID="cri-o://49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa" gracePeriod=30 Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.417826 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdwd9\" (UniqueName: \"kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9\") pod \"nova-cell0-b693-account-create-rgkl7\" (UID: \"fca19b3c-65ae-4c37-99c6-bc28789b64fe\") " pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.461874 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdwd9\" (UniqueName: \"kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9\") pod \"nova-cell0-b693-account-create-rgkl7\" (UID: \"fca19b3c-65ae-4c37-99c6-bc28789b64fe\") " pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.463064 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-3e6e-account-create-wpvvp"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.465180 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.469285 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.471734 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3e6e-account-create-wpvvp"] Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.551123 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerStarted","Data":"df4cde02d808cab51177e533bc3ec59f9b794a734ff239d016137c93bd4887a7"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.562512 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8585da6c-5a29-4a79-9aa4-5385381dfd08","Type":"ContainerStarted","Data":"db7befd164009469c1ccc3b7cecac840875d5f5db33996532400152db4d20906"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.568159 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"daf50ab9-a17b-4d53-a2f5-a1f11ed8455e","Type":"ContainerStarted","Data":"9ad7cd9ee3bf4c162187d6fae2b1afc091e882df959ea055fe0a0b92929fb38d"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.570335 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2048ffcd-1faf-44c6-a1e6-425501f44282","Type":"ContainerStarted","Data":"aa752b91938536b970e6e494543ac7013618b18e369767d9b166609ac27720c9"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.570370 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2048ffcd-1faf-44c6-a1e6-425501f44282","Type":"ContainerStarted","Data":"d47f46962ff7d9e2103c119cd4b7cdeafa35c2b03f847b1ac6cd25780f866286"} Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.570609 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.618669 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.634054 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pdfp\" (UniqueName: \"kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp\") pod \"nova-cell1-3e6e-account-create-wpvvp\" (UID: \"91833636-6232-43e0-96e4-13ab0be6dbbe\") " pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.682868 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.682846219 podStartE2EDuration="3.682846219s" podCreationTimestamp="2025-11-22 10:59:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:35.585435654 +0000 UTC m=+1308.053273053" watchObservedRunningTime="2025-11-22 10:59:35.682846219 +0000 UTC m=+1308.150683618" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.728187 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.728166282 podStartE2EDuration="3.728166282s" podCreationTimestamp="2025-11-22 10:59:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:35.633801513 +0000 UTC m=+1308.101638902" watchObservedRunningTime="2025-11-22 10:59:35.728166282 +0000 UTC m=+1308.196003681" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.732376 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.732364436 podStartE2EDuration="4.732364436s" podCreationTimestamp="2025-11-22 10:59:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:35.656471755 +0000 UTC m=+1308.124309154" watchObservedRunningTime="2025-11-22 10:59:35.732364436 +0000 UTC m=+1308.200201825" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.736847 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pdfp\" (UniqueName: \"kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp\") pod \"nova-cell1-3e6e-account-create-wpvvp\" (UID: \"91833636-6232-43e0-96e4-13ab0be6dbbe\") " pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.762502 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pdfp\" (UniqueName: \"kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp\") pod \"nova-cell1-3e6e-account-create-wpvvp\" (UID: \"91833636-6232-43e0-96e4-13ab0be6dbbe\") " pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.864725 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:35 crc kubenswrapper[4938]: I1122 10:59:35.957501 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3f44-account-create-vhdtx"] Nov 22 10:59:35 crc kubenswrapper[4938]: W1122 10:59:35.976784 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d38d750_11b4_4d05_a62b_86eb84910f96.slice/crio-612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b WatchSource:0}: Error finding container 612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b: Status 404 returned error can't find the container with id 612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.218957 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-b693-account-create-rgkl7"] Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.460231 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3e6e-account-create-wpvvp"] Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.594844 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" event={"ID":"91833636-6232-43e0-96e4-13ab0be6dbbe","Type":"ContainerStarted","Data":"3eecdc59eb636ee01da9a7ca8b1fa25390cb715e1f9ee5e378e3e1e1cbf35a2d"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.598287 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerStarted","Data":"824fcf044ae440da347b138fa196a3ce4e47766759263b4ec1c17e11ec49b1c1"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.598442 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-central-agent" containerID="cri-o://8abf61375c89b59a71efcb27c00c5459babdb60a897166aa56f43b84f02e9f43" gracePeriod=30 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.598705 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.598982 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="proxy-httpd" containerID="cri-o://824fcf044ae440da347b138fa196a3ce4e47766759263b4ec1c17e11ec49b1c1" gracePeriod=30 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.599044 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="sg-core" containerID="cri-o://df4cde02d808cab51177e533bc3ec59f9b794a734ff239d016137c93bd4887a7" gracePeriod=30 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.599124 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-notification-agent" containerID="cri-o://9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769" gracePeriod=30 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.606235 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b693-account-create-rgkl7" event={"ID":"fca19b3c-65ae-4c37-99c6-bc28789b64fe","Type":"ContainerStarted","Data":"4f4d97eda9b4f8c52b41dd2d7497364cf732b981472e439d69e60419c80776f8"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.606277 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b693-account-create-rgkl7" event={"ID":"fca19b3c-65ae-4c37-99c6-bc28789b64fe","Type":"ContainerStarted","Data":"21492540e8c17ffdeb4f732d8fe1c5f25fd51d08b364a4a264c611ee1a4a54e8"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.609838 4938 generic.go:334] "Generic (PLEG): container finished" podID="9d38d750-11b4-4d05-a62b-86eb84910f96" containerID="a3804884fcea569815fe4083170fa6ea6f9a2c8270bcbd6dfb14a40a296af1d2" exitCode=0 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.609970 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3f44-account-create-vhdtx" event={"ID":"9d38d750-11b4-4d05-a62b-86eb84910f96","Type":"ContainerDied","Data":"a3804884fcea569815fe4083170fa6ea6f9a2c8270bcbd6dfb14a40a296af1d2"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.609995 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3f44-account-create-vhdtx" event={"ID":"9d38d750-11b4-4d05-a62b-86eb84910f96","Type":"ContainerStarted","Data":"612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.615934 4938 generic.go:334] "Generic (PLEG): container finished" podID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerID="49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa" exitCode=0 Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.616779 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerDied","Data":"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa"} Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.632694 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.077096156 podStartE2EDuration="6.63262739s" podCreationTimestamp="2025-11-22 10:59:30 +0000 UTC" firstStartedPulling="2025-11-22 10:59:31.172789182 +0000 UTC m=+1303.640626581" lastFinishedPulling="2025-11-22 10:59:35.728320416 +0000 UTC m=+1308.196157815" observedRunningTime="2025-11-22 10:59:36.625061153 +0000 UTC m=+1309.092898552" watchObservedRunningTime="2025-11-22 10:59:36.63262739 +0000 UTC m=+1309.100464789" Nov 22 10:59:36 crc kubenswrapper[4938]: I1122 10:59:36.656422 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-b693-account-create-rgkl7" podStartSLOduration=1.6564025500000001 podStartE2EDuration="1.65640255s" podCreationTimestamp="2025-11-22 10:59:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:36.655734853 +0000 UTC m=+1309.123572252" watchObservedRunningTime="2025-11-22 10:59:36.65640255 +0000 UTC m=+1309.124239949" Nov 22 10:59:37 crc kubenswrapper[4938]: E1122 10:59:37.140363 4938 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod671bac44_f424_43fe_b02d_2b148f0262aa.slice/crio-9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769.scope\": RecentStats: unable to find data in memory cache]" Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.626966 4938 generic.go:334] "Generic (PLEG): container finished" podID="91833636-6232-43e0-96e4-13ab0be6dbbe" containerID="b7ed6d27b471bd500f9c6013c674caf20708f72a3265ed9b7f1794d10886d0bd" exitCode=0 Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.627140 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" event={"ID":"91833636-6232-43e0-96e4-13ab0be6dbbe","Type":"ContainerDied","Data":"b7ed6d27b471bd500f9c6013c674caf20708f72a3265ed9b7f1794d10886d0bd"} Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630275 4938 generic.go:334] "Generic (PLEG): container finished" podID="671bac44-f424-43fe-b02d-2b148f0262aa" containerID="824fcf044ae440da347b138fa196a3ce4e47766759263b4ec1c17e11ec49b1c1" exitCode=0 Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630319 4938 generic.go:334] "Generic (PLEG): container finished" podID="671bac44-f424-43fe-b02d-2b148f0262aa" containerID="df4cde02d808cab51177e533bc3ec59f9b794a734ff239d016137c93bd4887a7" exitCode=2 Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630332 4938 generic.go:334] "Generic (PLEG): container finished" podID="671bac44-f424-43fe-b02d-2b148f0262aa" containerID="9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769" exitCode=0 Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630311 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerDied","Data":"824fcf044ae440da347b138fa196a3ce4e47766759263b4ec1c17e11ec49b1c1"} Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630430 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerDied","Data":"df4cde02d808cab51177e533bc3ec59f9b794a734ff239d016137c93bd4887a7"} Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.630445 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerDied","Data":"9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769"} Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.632767 4938 generic.go:334] "Generic (PLEG): container finished" podID="fca19b3c-65ae-4c37-99c6-bc28789b64fe" containerID="4f4d97eda9b4f8c52b41dd2d7497364cf732b981472e439d69e60419c80776f8" exitCode=0 Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.632796 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b693-account-create-rgkl7" event={"ID":"fca19b3c-65ae-4c37-99c6-bc28789b64fe","Type":"ContainerDied","Data":"4f4d97eda9b4f8c52b41dd2d7497364cf732b981472e439d69e60419c80776f8"} Nov 22 10:59:37 crc kubenswrapper[4938]: I1122 10:59:37.978686 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.100343 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2kjt\" (UniqueName: \"kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt\") pod \"9d38d750-11b4-4d05-a62b-86eb84910f96\" (UID: \"9d38d750-11b4-4d05-a62b-86eb84910f96\") " Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.117881 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt" (OuterVolumeSpecName: "kube-api-access-l2kjt") pod "9d38d750-11b4-4d05-a62b-86eb84910f96" (UID: "9d38d750-11b4-4d05-a62b-86eb84910f96"). InnerVolumeSpecName "kube-api-access-l2kjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.202982 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2kjt\" (UniqueName: \"kubernetes.io/projected/9d38d750-11b4-4d05-a62b-86eb84910f96-kube-api-access-l2kjt\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.642831 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3f44-account-create-vhdtx" event={"ID":"9d38d750-11b4-4d05-a62b-86eb84910f96","Type":"ContainerDied","Data":"612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b"} Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.644738 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="612d0c29cb09fa49d5296e81ba17ee350167910b098ff7b068d1684e293d923b" Nov 22 10:59:38 crc kubenswrapper[4938]: I1122 10:59:38.643002 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3f44-account-create-vhdtx" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.112807 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.121049 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.226751 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdwd9\" (UniqueName: \"kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9\") pod \"fca19b3c-65ae-4c37-99c6-bc28789b64fe\" (UID: \"fca19b3c-65ae-4c37-99c6-bc28789b64fe\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.227160 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pdfp\" (UniqueName: \"kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp\") pod \"91833636-6232-43e0-96e4-13ab0be6dbbe\" (UID: \"91833636-6232-43e0-96e4-13ab0be6dbbe\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.238587 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9" (OuterVolumeSpecName: "kube-api-access-qdwd9") pod "fca19b3c-65ae-4c37-99c6-bc28789b64fe" (UID: "fca19b3c-65ae-4c37-99c6-bc28789b64fe"). InnerVolumeSpecName "kube-api-access-qdwd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.244960 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp" (OuterVolumeSpecName: "kube-api-access-4pdfp") pod "91833636-6232-43e0-96e4-13ab0be6dbbe" (UID: "91833636-6232-43e0-96e4-13ab0be6dbbe"). InnerVolumeSpecName "kube-api-access-4pdfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.330562 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pdfp\" (UniqueName: \"kubernetes.io/projected/91833636-6232-43e0-96e4-13ab0be6dbbe-kube-api-access-4pdfp\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.330596 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdwd9\" (UniqueName: \"kubernetes.io/projected/fca19b3c-65ae-4c37-99c6-bc28789b64fe-kube-api-access-qdwd9\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.423233 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.533727 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config\") pod \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.533924 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config\") pod \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.534018 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xmft\" (UniqueName: \"kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft\") pod \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.534077 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs\") pod \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.534383 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle\") pod \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\" (UID: \"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.536517 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" (UID: "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.539743 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft" (OuterVolumeSpecName: "kube-api-access-9xmft") pod "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" (UID: "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4"). InnerVolumeSpecName "kube-api-access-9xmft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.582071 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" (UID: "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.596385 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config" (OuterVolumeSpecName: "config") pod "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" (UID: "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.612375 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" (UID: "78c4b6bb-4906-4866-b45d-1ad4d3a95ce4"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.637275 4938 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.637317 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.637331 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xmft\" (UniqueName: \"kubernetes.io/projected/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-kube-api-access-9xmft\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.637348 4938 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.637360 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.655841 4938 generic.go:334] "Generic (PLEG): container finished" podID="671bac44-f424-43fe-b02d-2b148f0262aa" containerID="8abf61375c89b59a71efcb27c00c5459babdb60a897166aa56f43b84f02e9f43" exitCode=0 Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.655939 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerDied","Data":"8abf61375c89b59a71efcb27c00c5459babdb60a897166aa56f43b84f02e9f43"} Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.659149 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-b693-account-create-rgkl7" event={"ID":"fca19b3c-65ae-4c37-99c6-bc28789b64fe","Type":"ContainerDied","Data":"21492540e8c17ffdeb4f732d8fe1c5f25fd51d08b364a4a264c611ee1a4a54e8"} Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.659172 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-b693-account-create-rgkl7" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.659182 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21492540e8c17ffdeb4f732d8fe1c5f25fd51d08b364a4a264c611ee1a4a54e8" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.661281 4938 generic.go:334] "Generic (PLEG): container finished" podID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerID="d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e" exitCode=0 Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.661309 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerDied","Data":"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e"} Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.661329 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-798b4b7d-4sxkg" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.661352 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-798b4b7d-4sxkg" event={"ID":"78c4b6bb-4906-4866-b45d-1ad4d3a95ce4","Type":"ContainerDied","Data":"ba64b361a104eccb7bef3c959487d66452b4827dea31a49c3d4b019fd06e7abb"} Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.661374 4938 scope.go:117] "RemoveContainer" containerID="49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.665798 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" event={"ID":"91833636-6232-43e0-96e4-13ab0be6dbbe","Type":"ContainerDied","Data":"3eecdc59eb636ee01da9a7ca8b1fa25390cb715e1f9ee5e378e3e1e1cbf35a2d"} Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.665833 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3eecdc59eb636ee01da9a7ca8b1fa25390cb715e1f9ee5e378e3e1e1cbf35a2d" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.665882 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3e6e-account-create-wpvvp" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.689204 4938 scope.go:117] "RemoveContainer" containerID="d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.706584 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.716662 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-798b4b7d-4sxkg"] Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.721827 4938 scope.go:117] "RemoveContainer" containerID="49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa" Nov 22 10:59:39 crc kubenswrapper[4938]: E1122 10:59:39.725262 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa\": container with ID starting with 49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa not found: ID does not exist" containerID="49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.725296 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa"} err="failed to get container status \"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa\": rpc error: code = NotFound desc = could not find container \"49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa\": container with ID starting with 49085e7167e26979fe51ecec2e7cbfa8635cf866383127fb375517f4b12675aa not found: ID does not exist" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.725322 4938 scope.go:117] "RemoveContainer" containerID="d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e" Nov 22 10:59:39 crc kubenswrapper[4938]: E1122 10:59:39.725627 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e\": container with ID starting with d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e not found: ID does not exist" containerID="d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.725662 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e"} err="failed to get container status \"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e\": rpc error: code = NotFound desc = could not find container \"d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e\": container with ID starting with d149ba08ac2e15b8dcf98420395ba79af427ed40ca2d197d3d2be3a4b599ff2e not found: ID does not exist" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.832688 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942185 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942620 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942665 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942719 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942801 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942886 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942947 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7765l\" (UniqueName: \"kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.942985 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd\") pod \"671bac44-f424-43fe-b02d-2b148f0262aa\" (UID: \"671bac44-f424-43fe-b02d-2b148f0262aa\") " Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.943276 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.943521 4938 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.943668 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.946124 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts" (OuterVolumeSpecName: "scripts") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.948997 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l" (OuterVolumeSpecName: "kube-api-access-7765l") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "kube-api-access-7765l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.972602 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:39 crc kubenswrapper[4938]: I1122 10:59:39.999340 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.023552 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.043319 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data" (OuterVolumeSpecName: "config-data") pod "671bac44-f424-43fe-b02d-2b148f0262aa" (UID: "671bac44-f424-43fe-b02d-2b148f0262aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045548 4938 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045587 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7765l\" (UniqueName: \"kubernetes.io/projected/671bac44-f424-43fe-b02d-2b148f0262aa-kube-api-access-7765l\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045603 4938 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/671bac44-f424-43fe-b02d-2b148f0262aa-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045617 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045630 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045641 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.045653 4938 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/671bac44-f424-43fe-b02d-2b148f0262aa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.464572 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" path="/var/lib/kubelet/pods/78c4b6bb-4906-4866-b45d-1ad4d3a95ce4/volumes" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.680163 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"671bac44-f424-43fe-b02d-2b148f0262aa","Type":"ContainerDied","Data":"522e30d2a37155ee937e79543e0e8d1999a0ce21dd65044e0fb4f8aec7e6cf1a"} Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.680207 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.680221 4938 scope.go:117] "RemoveContainer" containerID="824fcf044ae440da347b138fa196a3ce4e47766759263b4ec1c17e11ec49b1c1" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.702792 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.703296 4938 scope.go:117] "RemoveContainer" containerID="df4cde02d808cab51177e533bc3ec59f9b794a734ff239d016137c93bd4887a7" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.719067 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.723295 4938 scope.go:117] "RemoveContainer" containerID="9441a81dac48e3d0087fdbc5ea56f149511758abd8cd481f042f160b01cbe769" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.728647 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729573 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-central-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729597 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-central-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729637 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca19b3c-65ae-4c37-99c6-bc28789b64fe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729644 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca19b3c-65ae-4c37-99c6-bc28789b64fe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729661 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="sg-core" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729667 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="sg-core" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729685 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d38d750-11b4-4d05-a62b-86eb84910f96" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729692 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d38d750-11b4-4d05-a62b-86eb84910f96" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729772 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91833636-6232-43e0-96e4-13ab0be6dbbe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729799 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="91833636-6232-43e0-96e4-13ab0be6dbbe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729835 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729841 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729880 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-notification-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729888 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-notification-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729941 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="proxy-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729948 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="proxy-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: E1122 10:59:40.729967 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-api" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.729976 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-api" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730843 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="proxy-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730879 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-central-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730901 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d38d750-11b4-4d05-a62b-86eb84910f96" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730932 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-api" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730941 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca19b3c-65ae-4c37-99c6-bc28789b64fe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730956 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="91833636-6232-43e0-96e4-13ab0be6dbbe" containerName="mariadb-account-create" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730965 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="78c4b6bb-4906-4866-b45d-1ad4d3a95ce4" containerName="neutron-httpd" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730983 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="ceilometer-notification-agent" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.730999 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" containerName="sg-core" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.739525 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.750471 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.750768 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.751444 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.760832 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.762833 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.762884 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrwsk\" (UniqueName: \"kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.762955 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.762982 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.763242 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.763295 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.763330 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.769165 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.771101 4938 scope.go:117] "RemoveContainer" containerID="8abf61375c89b59a71efcb27c00c5459babdb60a897166aa56f43b84f02e9f43" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.864842 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.864894 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.864932 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.864982 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.865008 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.865030 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrwsk\" (UniqueName: \"kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.865050 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.865067 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.867282 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.870649 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.871120 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.871441 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.871458 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.874535 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.876069 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.887536 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrwsk\" (UniqueName: \"kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk\") pod \"ceilometer-0\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " pod="openstack/ceilometer-0" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.902816 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-n264b"] Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.904264 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.907122 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.907235 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mll4z" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.907368 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.911502 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-n264b"] Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.966484 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zzgs\" (UniqueName: \"kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.966581 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.966807 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:40 crc kubenswrapper[4938]: I1122 10:59:40.966941 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.068453 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zzgs\" (UniqueName: \"kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.068519 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.068587 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.068615 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.073706 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.073780 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.074107 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.081426 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.091473 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zzgs\" (UniqueName: \"kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs\") pod \"nova-cell0-conductor-db-sync-n264b\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.275652 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.301323 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.301393 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.543883 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.697700 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerStarted","Data":"877d9f70e3c695db61df657aadf1022c235c87853e5b86b3f183d5227c2579a6"} Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.747111 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-n264b"] Nov 22 10:59:41 crc kubenswrapper[4938]: W1122 10:59:41.747673 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ec84446_c040_420f_bcb7_cfb23ec96eb9.slice/crio-c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528 WatchSource:0}: Error finding container c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528: Status 404 returned error can't find the container with id c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528 Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.839922 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.839977 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.876013 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:59:41 crc kubenswrapper[4938]: I1122 10:59:41.890315 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.461458 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="671bac44-f424-43fe-b02d-2b148f0262aa" path="/var/lib/kubelet/pods/671bac44-f424-43fe-b02d-2b148f0262aa/volumes" Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.711555 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-n264b" event={"ID":"9ec84446-c040-420f-bcb7-cfb23ec96eb9","Type":"ContainerStarted","Data":"c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528"} Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.714536 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerStarted","Data":"721a7ca8713e0eac25790794b11fb0ba077d45efa26ddec6458d6794eb239df9"} Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.714795 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.714856 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.986615 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:42 crc kubenswrapper[4938]: I1122 10:59:42.986861 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:43 crc kubenswrapper[4938]: I1122 10:59:43.035456 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:43 crc kubenswrapper[4938]: I1122 10:59:43.065456 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:43 crc kubenswrapper[4938]: I1122 10:59:43.731496 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerStarted","Data":"e9ca44502bada600f84c79512fe7e3e9a584a5309ce226beaa15211e8a6b4373"} Nov 22 10:59:43 crc kubenswrapper[4938]: I1122 10:59:43.732247 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:43 crc kubenswrapper[4938]: I1122 10:59:43.732274 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:44 crc kubenswrapper[4938]: I1122 10:59:44.746310 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerStarted","Data":"036e5c6978df991b9af76e857b5a0b281796b76f4d8ad2e761a4e856411fa59b"} Nov 22 10:59:44 crc kubenswrapper[4938]: I1122 10:59:44.918382 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:59:44 crc kubenswrapper[4938]: I1122 10:59:44.918495 4938 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:59:45 crc kubenswrapper[4938]: I1122 10:59:45.038458 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 22 10:59:45 crc kubenswrapper[4938]: I1122 10:59:45.309024 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:59:46 crc kubenswrapper[4938]: I1122 10:59:46.181311 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:46 crc kubenswrapper[4938]: I1122 10:59:46.181435 4938 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:59:46 crc kubenswrapper[4938]: I1122 10:59:46.188386 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:59:52 crc kubenswrapper[4938]: I1122 10:59:52.830562 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-n264b" event={"ID":"9ec84446-c040-420f-bcb7-cfb23ec96eb9","Type":"ContainerStarted","Data":"50c5a7157c9ddd89ba8992b92c14bbfa469242b74f0bb1910e95a8299f64e21d"} Nov 22 10:59:52 crc kubenswrapper[4938]: I1122 10:59:52.833360 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerStarted","Data":"a28f9a7d43054481cd014004ec6725abbfc8b745e6826dd9f786c79bdfd63bca"} Nov 22 10:59:52 crc kubenswrapper[4938]: I1122 10:59:52.833532 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:59:52 crc kubenswrapper[4938]: I1122 10:59:52.850532 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-n264b" podStartSLOduration=2.889834151 podStartE2EDuration="12.850515726s" podCreationTimestamp="2025-11-22 10:59:40 +0000 UTC" firstStartedPulling="2025-11-22 10:59:41.749835665 +0000 UTC m=+1314.217673064" lastFinishedPulling="2025-11-22 10:59:51.71051724 +0000 UTC m=+1324.178354639" observedRunningTime="2025-11-22 10:59:52.84783654 +0000 UTC m=+1325.315673949" watchObservedRunningTime="2025-11-22 10:59:52.850515726 +0000 UTC m=+1325.318353125" Nov 22 10:59:52 crc kubenswrapper[4938]: I1122 10:59:52.875493 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.719096509 podStartE2EDuration="12.875475715s" podCreationTimestamp="2025-11-22 10:59:40 +0000 UTC" firstStartedPulling="2025-11-22 10:59:41.552712999 +0000 UTC m=+1314.020550398" lastFinishedPulling="2025-11-22 10:59:51.709092205 +0000 UTC m=+1324.176929604" observedRunningTime="2025-11-22 10:59:52.87244772 +0000 UTC m=+1325.340285119" watchObservedRunningTime="2025-11-22 10:59:52.875475715 +0000 UTC m=+1325.343313114" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.143040 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt"] Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.147192 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.149696 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.150434 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.151781 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt"] Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.258928 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.259243 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfvmn\" (UniqueName: \"kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.259675 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.361430 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.361572 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.361625 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfvmn\" (UniqueName: \"kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.362404 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.370862 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.379241 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfvmn\" (UniqueName: \"kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn\") pod \"collect-profiles-29396820-6dlzt\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.477031 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:00 crc kubenswrapper[4938]: I1122 11:00:00.918396 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt"] Nov 22 11:00:00 crc kubenswrapper[4938]: W1122 11:00:00.921798 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod281dba48_4a29_47f7_8228_239a69e7214a.slice/crio-30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1 WatchSource:0}: Error finding container 30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1: Status 404 returned error can't find the container with id 30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1 Nov 22 11:00:01 crc kubenswrapper[4938]: I1122 11:00:01.921683 4938 generic.go:334] "Generic (PLEG): container finished" podID="281dba48-4a29-47f7-8228-239a69e7214a" containerID="96ba1039e4f3127780369d879e3ff5c718051ff9c18ee651a6fd74d8b8929616" exitCode=0 Nov 22 11:00:01 crc kubenswrapper[4938]: I1122 11:00:01.921760 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" event={"ID":"281dba48-4a29-47f7-8228-239a69e7214a","Type":"ContainerDied","Data":"96ba1039e4f3127780369d879e3ff5c718051ff9c18ee651a6fd74d8b8929616"} Nov 22 11:00:01 crc kubenswrapper[4938]: I1122 11:00:01.922689 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" event={"ID":"281dba48-4a29-47f7-8228-239a69e7214a","Type":"ContainerStarted","Data":"30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1"} Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.300101 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.421861 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume\") pod \"281dba48-4a29-47f7-8228-239a69e7214a\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.421938 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfvmn\" (UniqueName: \"kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn\") pod \"281dba48-4a29-47f7-8228-239a69e7214a\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.422001 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume\") pod \"281dba48-4a29-47f7-8228-239a69e7214a\" (UID: \"281dba48-4a29-47f7-8228-239a69e7214a\") " Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.422813 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume" (OuterVolumeSpecName: "config-volume") pod "281dba48-4a29-47f7-8228-239a69e7214a" (UID: "281dba48-4a29-47f7-8228-239a69e7214a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.427952 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "281dba48-4a29-47f7-8228-239a69e7214a" (UID: "281dba48-4a29-47f7-8228-239a69e7214a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.428469 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn" (OuterVolumeSpecName: "kube-api-access-rfvmn") pod "281dba48-4a29-47f7-8228-239a69e7214a" (UID: "281dba48-4a29-47f7-8228-239a69e7214a"). InnerVolumeSpecName "kube-api-access-rfvmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.524535 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/281dba48-4a29-47f7-8228-239a69e7214a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.524580 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfvmn\" (UniqueName: \"kubernetes.io/projected/281dba48-4a29-47f7-8228-239a69e7214a-kube-api-access-rfvmn\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.524593 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/281dba48-4a29-47f7-8228-239a69e7214a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.943023 4938 generic.go:334] "Generic (PLEG): container finished" podID="9ec84446-c040-420f-bcb7-cfb23ec96eb9" containerID="50c5a7157c9ddd89ba8992b92c14bbfa469242b74f0bb1910e95a8299f64e21d" exitCode=0 Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.943136 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-n264b" event={"ID":"9ec84446-c040-420f-bcb7-cfb23ec96eb9","Type":"ContainerDied","Data":"50c5a7157c9ddd89ba8992b92c14bbfa469242b74f0bb1910e95a8299f64e21d"} Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.944883 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" event={"ID":"281dba48-4a29-47f7-8228-239a69e7214a","Type":"ContainerDied","Data":"30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1"} Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.944994 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30d52ec26b8cf82b4f849b6a9eb1bae9f9101fead016df2f2555da33ac95a7b1" Nov 22 11:00:03 crc kubenswrapper[4938]: I1122 11:00:03.944937 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.277344 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.360430 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle\") pod \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.360591 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts\") pod \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.360676 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data\") pod \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.360736 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zzgs\" (UniqueName: \"kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs\") pod \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\" (UID: \"9ec84446-c040-420f-bcb7-cfb23ec96eb9\") " Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.369168 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs" (OuterVolumeSpecName: "kube-api-access-5zzgs") pod "9ec84446-c040-420f-bcb7-cfb23ec96eb9" (UID: "9ec84446-c040-420f-bcb7-cfb23ec96eb9"). InnerVolumeSpecName "kube-api-access-5zzgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.372652 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts" (OuterVolumeSpecName: "scripts") pod "9ec84446-c040-420f-bcb7-cfb23ec96eb9" (UID: "9ec84446-c040-420f-bcb7-cfb23ec96eb9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.392977 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ec84446-c040-420f-bcb7-cfb23ec96eb9" (UID: "9ec84446-c040-420f-bcb7-cfb23ec96eb9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.395627 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data" (OuterVolumeSpecName: "config-data") pod "9ec84446-c040-420f-bcb7-cfb23ec96eb9" (UID: "9ec84446-c040-420f-bcb7-cfb23ec96eb9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.463502 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.463545 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.463556 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zzgs\" (UniqueName: \"kubernetes.io/projected/9ec84446-c040-420f-bcb7-cfb23ec96eb9-kube-api-access-5zzgs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.463570 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ec84446-c040-420f-bcb7-cfb23ec96eb9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.966192 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-n264b" event={"ID":"9ec84446-c040-420f-bcb7-cfb23ec96eb9","Type":"ContainerDied","Data":"c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528"} Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.966229 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0a4dc78b352c4ace8099e54b75ef4aa958734beee9f6fcfd0afad2ab3afa528" Nov 22 11:00:05 crc kubenswrapper[4938]: I1122 11:00:05.966289 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-n264b" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.047331 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 11:00:06 crc kubenswrapper[4938]: E1122 11:00:06.047938 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="281dba48-4a29-47f7-8228-239a69e7214a" containerName="collect-profiles" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.048013 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="281dba48-4a29-47f7-8228-239a69e7214a" containerName="collect-profiles" Nov 22 11:00:06 crc kubenswrapper[4938]: E1122 11:00:06.048134 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec84446-c040-420f-bcb7-cfb23ec96eb9" containerName="nova-cell0-conductor-db-sync" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.048208 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec84446-c040-420f-bcb7-cfb23ec96eb9" containerName="nova-cell0-conductor-db-sync" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.048424 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="281dba48-4a29-47f7-8228-239a69e7214a" containerName="collect-profiles" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.048498 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ec84446-c040-420f-bcb7-cfb23ec96eb9" containerName="nova-cell0-conductor-db-sync" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.049144 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.052056 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mll4z" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.052542 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.064384 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.174129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.174369 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.174668 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vpnt\" (UniqueName: \"kubernetes.io/projected/59fde2f8-5c0c-44ab-a02b-87115af94dc0-kube-api-access-7vpnt\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.276784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.276838 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.276899 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vpnt\" (UniqueName: \"kubernetes.io/projected/59fde2f8-5c0c-44ab-a02b-87115af94dc0-kube-api-access-7vpnt\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.282554 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.282652 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59fde2f8-5c0c-44ab-a02b-87115af94dc0-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.307544 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vpnt\" (UniqueName: \"kubernetes.io/projected/59fde2f8-5c0c-44ab-a02b-87115af94dc0-kube-api-access-7vpnt\") pod \"nova-cell0-conductor-0\" (UID: \"59fde2f8-5c0c-44ab-a02b-87115af94dc0\") " pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.365717 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.824723 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 11:00:06 crc kubenswrapper[4938]: W1122 11:00:06.831271 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59fde2f8_5c0c_44ab_a02b_87115af94dc0.slice/crio-5cd286643ca38f5ec33558a2a7aaf3da75b2c5c44643d87c165fe4f229b231cb WatchSource:0}: Error finding container 5cd286643ca38f5ec33558a2a7aaf3da75b2c5c44643d87c165fe4f229b231cb: Status 404 returned error can't find the container with id 5cd286643ca38f5ec33558a2a7aaf3da75b2c5c44643d87c165fe4f229b231cb Nov 22 11:00:06 crc kubenswrapper[4938]: I1122 11:00:06.977772 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"59fde2f8-5c0c-44ab-a02b-87115af94dc0","Type":"ContainerStarted","Data":"5cd286643ca38f5ec33558a2a7aaf3da75b2c5c44643d87c165fe4f229b231cb"} Nov 22 11:00:07 crc kubenswrapper[4938]: I1122 11:00:07.988842 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"59fde2f8-5c0c-44ab-a02b-87115af94dc0","Type":"ContainerStarted","Data":"44dffad2721a06ab7b34ead032466598108c2a80747ce8610522522ed1e5bbc5"} Nov 22 11:00:08 crc kubenswrapper[4938]: I1122 11:00:08.004618 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.004591075 podStartE2EDuration="2.004591075s" podCreationTimestamp="2025-11-22 11:00:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:08.001422717 +0000 UTC m=+1340.469260136" watchObservedRunningTime="2025-11-22 11:00:08.004591075 +0000 UTC m=+1340.472428464" Nov 22 11:00:08 crc kubenswrapper[4938]: I1122 11:00:08.997536 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:11 crc kubenswrapper[4938]: I1122 11:00:11.088697 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 11:00:11 crc kubenswrapper[4938]: I1122 11:00:11.300853 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:00:11 crc kubenswrapper[4938]: I1122 11:00:11.300974 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:00:16 crc kubenswrapper[4938]: I1122 11:00:16.391445 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.142623 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-5mrph"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.144441 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.146749 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.147761 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.179055 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.179107 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.179131 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmk6k\" (UniqueName: \"kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.179224 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.201582 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-5mrph"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.280854 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.280990 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.281035 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.281063 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmk6k\" (UniqueName: \"kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.292262 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.297558 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.313320 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmk6k\" (UniqueName: \"kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.325751 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data\") pod \"nova-cell0-cell-mapping-5mrph\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.373762 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.377497 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.381971 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.392069 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.393648 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.397742 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.411530 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.425861 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.465806 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.486011 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.486189 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.486219 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf4ck\" (UniqueName: \"kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590179 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590553 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf4ck\" (UniqueName: \"kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590621 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590667 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b62b\" (UniqueName: \"kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590712 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.590749 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.595702 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.609027 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.623647 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf4ck\" (UniqueName: \"kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck\") pod \"nova-scheduler-0\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.694670 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.695750 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b62b\" (UniqueName: \"kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.695821 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.695856 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.701599 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.705285 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.721378 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.749227 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.756730 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b62b\" (UniqueName: \"kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b\") pod \"nova-cell1-novncproxy-0\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.761555 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.764399 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.806163 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfmqf\" (UniqueName: \"kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.806218 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.806291 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.806324 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.819417 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.845394 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.846874 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.853302 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.855482 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.900105 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.901826 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907746 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907817 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907842 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907864 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907889 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907925 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907958 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.907978 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908009 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfmqf\" (UniqueName: \"kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908026 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5lhk\" (UniqueName: \"kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908054 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908075 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908094 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.908112 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjkvz\" (UniqueName: \"kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.909157 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.913727 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.914293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.927687 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:00:17 crc kubenswrapper[4938]: I1122 11:00:17.928662 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfmqf\" (UniqueName: \"kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf\") pod \"nova-api-0\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " pod="openstack/nova-api-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.009779 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.012284 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.012357 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.012561 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.013264 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.013306 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.013446 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.027487 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.027486 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.027942 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.028035 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5lhk\" (UniqueName: \"kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.028178 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.028218 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.028249 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjkvz\" (UniqueName: \"kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.029274 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.030077 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.030551 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.036330 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.042076 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5lhk\" (UniqueName: \"kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk\") pod \"nova-metadata-0\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.050662 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjkvz\" (UniqueName: \"kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz\") pod \"dnsmasq-dns-bccf8f775-hxnp6\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.077584 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.169118 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.224366 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.259056 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dvwrn"] Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.260649 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.269041 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.269562 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dvwrn"] Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.269578 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.334327 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.334533 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.334591 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4j5b\" (UniqueName: \"kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.334859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.436523 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.436622 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4j5b\" (UniqueName: \"kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.436677 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.436725 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.442060 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.442302 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.444030 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.463500 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4j5b\" (UniqueName: \"kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b\") pod \"nova-cell1-conductor-db-sync-dvwrn\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:18 crc kubenswrapper[4938]: I1122 11:00:18.579043 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.451572 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.470304 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-5mrph"] Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.534236 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.643801 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.675011 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:00:19 crc kubenswrapper[4938]: W1122 11:00:19.678379 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod413e9101_7406_46b5_b183_87c6b75b1ac8.slice/crio-fb331b046a876ed9a380749a7a2eb4472543fe9ea23dcf4ba1273915427d14d7 WatchSource:0}: Error finding container fb331b046a876ed9a380749a7a2eb4472543fe9ea23dcf4ba1273915427d14d7: Status 404 returned error can't find the container with id fb331b046a876ed9a380749a7a2eb4472543fe9ea23dcf4ba1273915427d14d7 Nov 22 11:00:19 crc kubenswrapper[4938]: W1122 11:00:19.691449 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb51f59c0_79bd_43ee_b657_1e2aa209c3af.slice/crio-c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f WatchSource:0}: Error finding container c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f: Status 404 returned error can't find the container with id c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.705187 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dvwrn"] Nov 22 11:00:19 crc kubenswrapper[4938]: W1122 11:00:19.706373 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84bd3edb_e3da_4697_a1b9_d058b12a5476.slice/crio-c3aa1e6d9b6e8dcc018ec6ad276c3bb619a4d9ded17fbfa28c406842ebd09afb WatchSource:0}: Error finding container c3aa1e6d9b6e8dcc018ec6ad276c3bb619a4d9ded17fbfa28c406842ebd09afb: Status 404 returned error can't find the container with id c3aa1e6d9b6e8dcc018ec6ad276c3bb619a4d9ded17fbfa28c406842ebd09afb Nov 22 11:00:19 crc kubenswrapper[4938]: I1122 11:00:19.720279 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.115650 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" event={"ID":"b51f59c0-79bd-43ee-b657-1e2aa209c3af","Type":"ContainerStarted","Data":"41c3eb3196d36b9546195316bf62b34db3524528d0497fe215cdf5f66211c445"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.115731 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" event={"ID":"b51f59c0-79bd-43ee-b657-1e2aa209c3af","Type":"ContainerStarted","Data":"c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.116818 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0ae3f585-65c4-4e6b-82e3-b113549fc229","Type":"ContainerStarted","Data":"9649b248dbfaf1601ffed3c1a04f883dc45185a22cba0206275acbf893d26ae7"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.118988 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5mrph" event={"ID":"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e","Type":"ContainerStarted","Data":"9d3ed1a8030d80d9b4c97afdba43174ad2e28b921be9a2d0b4fb46a9e449d213"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.119024 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5mrph" event={"ID":"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e","Type":"ContainerStarted","Data":"e8a4a76db6ecea8cf1c9e909d9ebe3e3cce2d011c82a615111e4f23c3fa76de8"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.121239 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerStarted","Data":"c3aa1e6d9b6e8dcc018ec6ad276c3bb619a4d9ded17fbfa28c406842ebd09afb"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.128043 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerStarted","Data":"83b926d2f386e57bbd7c4fdcf0cd5a778b6ecd7ec617866a2f2ddc58e43dd875"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.131722 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45d22256-b34b-4614-8f08-97d6ff65511b","Type":"ContainerStarted","Data":"4914305487d25b637c774a1fed62ea96c44f7cd81936448cd0472b3b5cfe61e0"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.133520 4938 generic.go:334] "Generic (PLEG): container finished" podID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerID="24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d" exitCode=0 Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.133566 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" event={"ID":"413e9101-7406-46b5-b183-87c6b75b1ac8","Type":"ContainerDied","Data":"24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.133594 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" event={"ID":"413e9101-7406-46b5-b183-87c6b75b1ac8","Type":"ContainerStarted","Data":"fb331b046a876ed9a380749a7a2eb4472543fe9ea23dcf4ba1273915427d14d7"} Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.139130 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" podStartSLOduration=2.139110371 podStartE2EDuration="2.139110371s" podCreationTimestamp="2025-11-22 11:00:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:20.13626666 +0000 UTC m=+1352.604104059" watchObservedRunningTime="2025-11-22 11:00:20.139110371 +0000 UTC m=+1352.606947770" Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.154188 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-5mrph" podStartSLOduration=3.154170474 podStartE2EDuration="3.154170474s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:20.148988606 +0000 UTC m=+1352.616826025" watchObservedRunningTime="2025-11-22 11:00:20.154170474 +0000 UTC m=+1352.622007873" Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.972198 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:20 crc kubenswrapper[4938]: I1122 11:00:20.998291 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:21 crc kubenswrapper[4938]: I1122 11:00:21.169754 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" event={"ID":"413e9101-7406-46b5-b183-87c6b75b1ac8","Type":"ContainerStarted","Data":"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112"} Nov 22 11:00:21 crc kubenswrapper[4938]: I1122 11:00:21.191418 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" podStartSLOduration=4.191402923 podStartE2EDuration="4.191402923s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:21.189333822 +0000 UTC m=+1353.657171211" watchObservedRunningTime="2025-11-22 11:00:21.191402923 +0000 UTC m=+1353.659240322" Nov 22 11:00:22 crc kubenswrapper[4938]: I1122 11:00:22.179063 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.199343 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerStarted","Data":"318a38159589ba2a0610797c43ee5f338163a4838f7eef9978043c00e9c9cc96"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.199970 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerStarted","Data":"3eda6bf2a6368859fff3c4cd14cf4180ae3f12a3c02e9ef5c9b22451d80f3a1c"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.199482 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-metadata" containerID="cri-o://318a38159589ba2a0610797c43ee5f338163a4838f7eef9978043c00e9c9cc96" gracePeriod=30 Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.199428 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-log" containerID="cri-o://3eda6bf2a6368859fff3c4cd14cf4180ae3f12a3c02e9ef5c9b22451d80f3a1c" gracePeriod=30 Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.201270 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45d22256-b34b-4614-8f08-97d6ff65511b","Type":"ContainerStarted","Data":"8aee91714eb7b8683df510e891af0cc2ce4e183b33f8d080061f5cc34a8ec99c"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.201418 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="45d22256-b34b-4614-8f08-97d6ff65511b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://8aee91714eb7b8683df510e891af0cc2ce4e183b33f8d080061f5cc34a8ec99c" gracePeriod=30 Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.205949 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0ae3f585-65c4-4e6b-82e3-b113549fc229","Type":"ContainerStarted","Data":"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.210047 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerStarted","Data":"52be20538f39c4581072490d5e9ed5b6ab387a4d4c1140225775361f812dcc66"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.210091 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerStarted","Data":"1af0a7eb871f4535ff940431f1525a57552b15a41974fbd9c2712eabfe4efa91"} Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.234250 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.591038753 podStartE2EDuration="7.234232073s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="2025-11-22 11:00:19.448876894 +0000 UTC m=+1351.916714293" lastFinishedPulling="2025-11-22 11:00:23.092070214 +0000 UTC m=+1355.559907613" observedRunningTime="2025-11-22 11:00:24.225730162 +0000 UTC m=+1356.693567581" watchObservedRunningTime="2025-11-22 11:00:24.234232073 +0000 UTC m=+1356.702069472" Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.276303 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.713304842 podStartE2EDuration="7.276284115s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="2025-11-22 11:00:19.54597138 +0000 UTC m=+1352.013808779" lastFinishedPulling="2025-11-22 11:00:23.108950653 +0000 UTC m=+1355.576788052" observedRunningTime="2025-11-22 11:00:24.262776011 +0000 UTC m=+1356.730613410" watchObservedRunningTime="2025-11-22 11:00:24.276284115 +0000 UTC m=+1356.744121514" Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.280255 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.840613109 podStartE2EDuration="7.280236533s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="2025-11-22 11:00:19.651204609 +0000 UTC m=+1352.119042008" lastFinishedPulling="2025-11-22 11:00:23.090828033 +0000 UTC m=+1355.558665432" observedRunningTime="2025-11-22 11:00:24.279626028 +0000 UTC m=+1356.747463437" watchObservedRunningTime="2025-11-22 11:00:24.280236533 +0000 UTC m=+1356.748073932" Nov 22 11:00:24 crc kubenswrapper[4938]: I1122 11:00:24.317076 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.91854207 podStartE2EDuration="7.317052546s" podCreationTimestamp="2025-11-22 11:00:17 +0000 UTC" firstStartedPulling="2025-11-22 11:00:19.709241917 +0000 UTC m=+1352.177079316" lastFinishedPulling="2025-11-22 11:00:23.107752393 +0000 UTC m=+1355.575589792" observedRunningTime="2025-11-22 11:00:24.301036269 +0000 UTC m=+1356.768873668" watchObservedRunningTime="2025-11-22 11:00:24.317052546 +0000 UTC m=+1356.784889945" Nov 22 11:00:25 crc kubenswrapper[4938]: I1122 11:00:25.220861 4938 generic.go:334] "Generic (PLEG): container finished" podID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerID="318a38159589ba2a0610797c43ee5f338163a4838f7eef9978043c00e9c9cc96" exitCode=0 Nov 22 11:00:25 crc kubenswrapper[4938]: I1122 11:00:25.221187 4938 generic.go:334] "Generic (PLEG): container finished" podID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerID="3eda6bf2a6368859fff3c4cd14cf4180ae3f12a3c02e9ef5c9b22451d80f3a1c" exitCode=143 Nov 22 11:00:25 crc kubenswrapper[4938]: I1122 11:00:25.221077 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerDied","Data":"318a38159589ba2a0610797c43ee5f338163a4838f7eef9978043c00e9c9cc96"} Nov 22 11:00:25 crc kubenswrapper[4938]: I1122 11:00:25.222237 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerDied","Data":"3eda6bf2a6368859fff3c4cd14cf4180ae3f12a3c02e9ef5c9b22451d80f3a1c"} Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.040796 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.200799 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data\") pod \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.200897 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs\") pod \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.200965 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5lhk\" (UniqueName: \"kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk\") pod \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.201109 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle\") pod \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\" (UID: \"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8\") " Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.201479 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs" (OuterVolumeSpecName: "logs") pod "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" (UID: "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.206108 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk" (OuterVolumeSpecName: "kube-api-access-g5lhk") pod "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" (UID: "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8"). InnerVolumeSpecName "kube-api-access-g5lhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.230308 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data" (OuterVolumeSpecName: "config-data") pod "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" (UID: "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.231455 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" (UID: "af9b0645-dbdd-40db-94b9-ec41f0c3dbc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.232440 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"af9b0645-dbdd-40db-94b9-ec41f0c3dbc8","Type":"ContainerDied","Data":"83b926d2f386e57bbd7c4fdcf0cd5a778b6ecd7ec617866a2f2ddc58e43dd875"} Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.232499 4938 scope.go:117] "RemoveContainer" containerID="318a38159589ba2a0610797c43ee5f338163a4838f7eef9978043c00e9c9cc96" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.232605 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.303218 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.303265 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.303278 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5lhk\" (UniqueName: \"kubernetes.io/projected/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-kube-api-access-g5lhk\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.303289 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.321096 4938 scope.go:117] "RemoveContainer" containerID="3eda6bf2a6368859fff3c4cd14cf4180ae3f12a3c02e9ef5c9b22451d80f3a1c" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.328251 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.337163 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.356720 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:26 crc kubenswrapper[4938]: E1122 11:00:26.357281 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-log" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.357299 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-log" Nov 22 11:00:26 crc kubenswrapper[4938]: E1122 11:00:26.357328 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-metadata" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.357335 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-metadata" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.357542 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-metadata" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.357569 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" containerName="nova-metadata-log" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.358873 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.363953 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.400524 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.400880 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.461782 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af9b0645-dbdd-40db-94b9-ec41f0c3dbc8" path="/var/lib/kubelet/pods/af9b0645-dbdd-40db-94b9-ec41f0c3dbc8/volumes" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.511278 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.511322 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.511380 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.511410 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8526s\" (UniqueName: \"kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.511684 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615153 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615252 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615316 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615382 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8526s\" (UniqueName: \"kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615467 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.615637 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.619737 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.620328 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.638810 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.642887 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8526s\" (UniqueName: \"kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s\") pod \"nova-metadata-0\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " pod="openstack/nova-metadata-0" Nov 22 11:00:26 crc kubenswrapper[4938]: I1122 11:00:26.726292 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.194011 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:27 crc kubenswrapper[4938]: W1122 11:00:27.220172 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod102b406b_1231_4532_9aa3_592aa9572571.slice/crio-317a760fb78ddae73d32e34060c8c7a3f9f9733e5633edd4f843e69b7903739b WatchSource:0}: Error finding container 317a760fb78ddae73d32e34060c8c7a3f9f9733e5633edd4f843e69b7903739b: Status 404 returned error can't find the container with id 317a760fb78ddae73d32e34060c8c7a3f9f9733e5633edd4f843e69b7903739b Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.242418 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerStarted","Data":"317a760fb78ddae73d32e34060c8c7a3f9f9733e5633edd4f843e69b7903739b"} Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.762650 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.762991 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.765645 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:27 crc kubenswrapper[4938]: I1122 11:00:27.789346 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.078650 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.078713 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.227067 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.256461 4938 generic.go:334] "Generic (PLEG): container finished" podID="7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" containerID="9d3ed1a8030d80d9b4c97afdba43174ad2e28b921be9a2d0b4fb46a9e449d213" exitCode=0 Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.256539 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5mrph" event={"ID":"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e","Type":"ContainerDied","Data":"9d3ed1a8030d80d9b4c97afdba43174ad2e28b921be9a2d0b4fb46a9e449d213"} Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.263792 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerStarted","Data":"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7"} Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.263838 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerStarted","Data":"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd"} Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.319082 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.328656 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.329222 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="dnsmasq-dns" containerID="cri-o://39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499" gracePeriod=10 Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.332495 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.332472222 podStartE2EDuration="2.332472222s" podCreationTimestamp="2025-11-22 11:00:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:28.302400716 +0000 UTC m=+1360.770238115" watchObservedRunningTime="2025-11-22 11:00:28.332472222 +0000 UTC m=+1360.800309631" Nov 22 11:00:28 crc kubenswrapper[4938]: E1122 11:00:28.494951 4938 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb445e4c_0293_4b98_8adb_0d7b9613b932.slice/crio-conmon-39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499.scope\": RecentStats: unable to find data in memory cache]" Nov 22 11:00:28 crc kubenswrapper[4938]: I1122 11:00:28.975388 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080131 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080182 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080212 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9zkl\" (UniqueName: \"kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080269 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080321 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.080388 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config\") pod \"eb445e4c-0293-4b98-8adb-0d7b9613b932\" (UID: \"eb445e4c-0293-4b98-8adb-0d7b9613b932\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.105336 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl" (OuterVolumeSpecName: "kube-api-access-m9zkl") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "kube-api-access-m9zkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.161883 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.163532 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config" (OuterVolumeSpecName: "config") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.163768 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.182674 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.183451 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.184804 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.185291 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.185316 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.185330 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9zkl\" (UniqueName: \"kubernetes.io/projected/eb445e4c-0293-4b98-8adb-0d7b9613b932-kube-api-access-m9zkl\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.185343 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.185354 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.208136 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eb445e4c-0293-4b98-8adb-0d7b9613b932" (UID: "eb445e4c-0293-4b98-8adb-0d7b9613b932"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.282635 4938 generic.go:334] "Generic (PLEG): container finished" podID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerID="39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499" exitCode=0 Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.282729 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" event={"ID":"eb445e4c-0293-4b98-8adb-0d7b9613b932","Type":"ContainerDied","Data":"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499"} Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.282802 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" event={"ID":"eb445e4c-0293-4b98-8adb-0d7b9613b932","Type":"ContainerDied","Data":"c492513bd5f2404871c8752e9e2acc93316f8ea863f51651247a66eb73567bd4"} Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.282821 4938 scope.go:117] "RemoveContainer" containerID="39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.283855 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-xbj6x" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.288110 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eb445e4c-0293-4b98-8adb-0d7b9613b932-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.336798 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.349382 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-xbj6x"] Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.358099 4938 scope.go:117] "RemoveContainer" containerID="5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.410507 4938 scope.go:117] "RemoveContainer" containerID="39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499" Nov 22 11:00:29 crc kubenswrapper[4938]: E1122 11:00:29.411772 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499\": container with ID starting with 39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499 not found: ID does not exist" containerID="39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.411812 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499"} err="failed to get container status \"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499\": rpc error: code = NotFound desc = could not find container \"39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499\": container with ID starting with 39f0c0892b418e338b6d18ec7b791bd8764a8c03402b18e9dc67c955ee01f499 not found: ID does not exist" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.411842 4938 scope.go:117] "RemoveContainer" containerID="5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470" Nov 22 11:00:29 crc kubenswrapper[4938]: E1122 11:00:29.412849 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470\": container with ID starting with 5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470 not found: ID does not exist" containerID="5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.412880 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470"} err="failed to get container status \"5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470\": rpc error: code = NotFound desc = could not find container \"5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470\": container with ID starting with 5f6dfdec8282cd2c206959eb4f96f4852f73990c685307b5272bbfdb06231470 not found: ID does not exist" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.736669 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.912638 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts\") pod \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.912719 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmk6k\" (UniqueName: \"kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k\") pod \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.912768 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle\") pod \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.912800 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data\") pod \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\" (UID: \"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e\") " Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.917651 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k" (OuterVolumeSpecName: "kube-api-access-lmk6k") pod "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" (UID: "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e"). InnerVolumeSpecName "kube-api-access-lmk6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.919273 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts" (OuterVolumeSpecName: "scripts") pod "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" (UID: "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.951660 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" (UID: "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:29 crc kubenswrapper[4938]: I1122 11:00:29.967658 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data" (OuterVolumeSpecName: "config-data") pod "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" (UID: "7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.014842 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.014886 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmk6k\" (UniqueName: \"kubernetes.io/projected/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-kube-api-access-lmk6k\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.014902 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.014930 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.293387 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5mrph" event={"ID":"7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e","Type":"ContainerDied","Data":"e8a4a76db6ecea8cf1c9e909d9ebe3e3cce2d011c82a615111e4f23c3fa76de8"} Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.293699 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8a4a76db6ecea8cf1c9e909d9ebe3e3cce2d011c82a615111e4f23c3fa76de8" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.293771 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5mrph" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.459614 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" path="/var/lib/kubelet/pods/eb445e4c-0293-4b98-8adb-0d7b9613b932/volumes" Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.476369 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.476619 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-log" containerID="cri-o://1af0a7eb871f4535ff940431f1525a57552b15a41974fbd9c2712eabfe4efa91" gracePeriod=30 Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.476838 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-api" containerID="cri-o://52be20538f39c4581072490d5e9ed5b6ab387a4d4c1140225775361f812dcc66" gracePeriod=30 Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.490973 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.491206 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerName="nova-scheduler-scheduler" containerID="cri-o://097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" gracePeriod=30 Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.549606 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.549879 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-log" containerID="cri-o://ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" gracePeriod=30 Nov 22 11:00:30 crc kubenswrapper[4938]: I1122 11:00:30.549985 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-metadata" containerID="cri-o://ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" gracePeriod=30 Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.011808 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.037581 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle\") pod \"102b406b-1231-4532-9aa3-592aa9572571\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.037731 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs\") pod \"102b406b-1231-4532-9aa3-592aa9572571\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.038162 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data\") pod \"102b406b-1231-4532-9aa3-592aa9572571\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.038296 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8526s\" (UniqueName: \"kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s\") pod \"102b406b-1231-4532-9aa3-592aa9572571\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.038368 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs\") pod \"102b406b-1231-4532-9aa3-592aa9572571\" (UID: \"102b406b-1231-4532-9aa3-592aa9572571\") " Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.038706 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs" (OuterVolumeSpecName: "logs") pod "102b406b-1231-4532-9aa3-592aa9572571" (UID: "102b406b-1231-4532-9aa3-592aa9572571"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.038977 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/102b406b-1231-4532-9aa3-592aa9572571-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.044546 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s" (OuterVolumeSpecName: "kube-api-access-8526s") pod "102b406b-1231-4532-9aa3-592aa9572571" (UID: "102b406b-1231-4532-9aa3-592aa9572571"). InnerVolumeSpecName "kube-api-access-8526s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.075171 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "102b406b-1231-4532-9aa3-592aa9572571" (UID: "102b406b-1231-4532-9aa3-592aa9572571"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.081061 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data" (OuterVolumeSpecName: "config-data") pod "102b406b-1231-4532-9aa3-592aa9572571" (UID: "102b406b-1231-4532-9aa3-592aa9572571"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.111297 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "102b406b-1231-4532-9aa3-592aa9572571" (UID: "102b406b-1231-4532-9aa3-592aa9572571"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.140595 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8526s\" (UniqueName: \"kubernetes.io/projected/102b406b-1231-4532-9aa3-592aa9572571-kube-api-access-8526s\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.140641 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.140651 4938 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.140660 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/102b406b-1231-4532-9aa3-592aa9572571-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.315744 4938 generic.go:334] "Generic (PLEG): container finished" podID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerID="1af0a7eb871f4535ff940431f1525a57552b15a41974fbd9c2712eabfe4efa91" exitCode=143 Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.315869 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerDied","Data":"1af0a7eb871f4535ff940431f1525a57552b15a41974fbd9c2712eabfe4efa91"} Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.319928 4938 generic.go:334] "Generic (PLEG): container finished" podID="102b406b-1231-4532-9aa3-592aa9572571" containerID="ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" exitCode=0 Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.319962 4938 generic.go:334] "Generic (PLEG): container finished" podID="102b406b-1231-4532-9aa3-592aa9572571" containerID="ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" exitCode=143 Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.319985 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerDied","Data":"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7"} Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.320018 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerDied","Data":"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd"} Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.320033 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"102b406b-1231-4532-9aa3-592aa9572571","Type":"ContainerDied","Data":"317a760fb78ddae73d32e34060c8c7a3f9f9733e5633edd4f843e69b7903739b"} Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.320052 4938 scope.go:117] "RemoveContainer" containerID="ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.320198 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.366143 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.382066 4938 scope.go:117] "RemoveContainer" containerID="ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.387259 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.399989 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.400570 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="init" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400592 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="init" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.400607 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" containerName="nova-manage" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400615 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" containerName="nova-manage" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.400642 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-metadata" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400652 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-metadata" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.400666 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-log" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400676 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-log" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.400718 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="dnsmasq-dns" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400728 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="dnsmasq-dns" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.400991 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" containerName="nova-manage" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.401014 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-log" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.401041 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb445e4c-0293-4b98-8adb-0d7b9613b932" containerName="dnsmasq-dns" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.401057 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="102b406b-1231-4532-9aa3-592aa9572571" containerName="nova-metadata-metadata" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.402421 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.420284 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.420429 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.426434 4938 scope.go:117] "RemoveContainer" containerID="ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.427053 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7\": container with ID starting with ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7 not found: ID does not exist" containerID="ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.427099 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7"} err="failed to get container status \"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7\": rpc error: code = NotFound desc = could not find container \"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7\": container with ID starting with ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7 not found: ID does not exist" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.427127 4938 scope.go:117] "RemoveContainer" containerID="ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" Nov 22 11:00:31 crc kubenswrapper[4938]: E1122 11:00:31.428155 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd\": container with ID starting with ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd not found: ID does not exist" containerID="ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.428189 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd"} err="failed to get container status \"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd\": rpc error: code = NotFound desc = could not find container \"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd\": container with ID starting with ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd not found: ID does not exist" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.428212 4938 scope.go:117] "RemoveContainer" containerID="ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.428460 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7"} err="failed to get container status \"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7\": rpc error: code = NotFound desc = could not find container \"ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7\": container with ID starting with ae78e94a993675ca0cdc50656216515a8991c050e07194f6e325da0ab56530a7 not found: ID does not exist" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.428487 4938 scope.go:117] "RemoveContainer" containerID="ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.428696 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd"} err="failed to get container status \"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd\": rpc error: code = NotFound desc = could not find container \"ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd\": container with ID starting with ed0d6e305246e141d6c831aede7745314a453773eef1055e07f4bbd25be246dd not found: ID does not exist" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.444966 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.445082 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.445109 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.445130 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm4jz\" (UniqueName: \"kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.445279 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.448416 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.546990 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.547351 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.547457 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.547591 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm4jz\" (UniqueName: \"kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.547814 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.549314 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.555693 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.558448 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.559062 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.578333 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm4jz\" (UniqueName: \"kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz\") pod \"nova-metadata-0\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " pod="openstack/nova-metadata-0" Nov 22 11:00:31 crc kubenswrapper[4938]: I1122 11:00:31.742062 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:32 crc kubenswrapper[4938]: I1122 11:00:32.194372 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:32 crc kubenswrapper[4938]: W1122 11:00:32.196563 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod768c8171_f2ed_498d_bba5_debf776d1a25.slice/crio-c3cf71930dfc481b9b3a5c67d4db6c4df7e08102bc87f48c01ad874f00bd6e79 WatchSource:0}: Error finding container c3cf71930dfc481b9b3a5c67d4db6c4df7e08102bc87f48c01ad874f00bd6e79: Status 404 returned error can't find the container with id c3cf71930dfc481b9b3a5c67d4db6c4df7e08102bc87f48c01ad874f00bd6e79 Nov 22 11:00:32 crc kubenswrapper[4938]: I1122 11:00:32.330394 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerStarted","Data":"c3cf71930dfc481b9b3a5c67d4db6c4df7e08102bc87f48c01ad874f00bd6e79"} Nov 22 11:00:32 crc kubenswrapper[4938]: I1122 11:00:32.331961 4938 generic.go:334] "Generic (PLEG): container finished" podID="b51f59c0-79bd-43ee-b657-1e2aa209c3af" containerID="41c3eb3196d36b9546195316bf62b34db3524528d0497fe215cdf5f66211c445" exitCode=0 Nov 22 11:00:32 crc kubenswrapper[4938]: I1122 11:00:32.332031 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" event={"ID":"b51f59c0-79bd-43ee-b657-1e2aa209c3af","Type":"ContainerDied","Data":"41c3eb3196d36b9546195316bf62b34db3524528d0497fe215cdf5f66211c445"} Nov 22 11:00:32 crc kubenswrapper[4938]: I1122 11:00:32.457926 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="102b406b-1231-4532-9aa3-592aa9572571" path="/var/lib/kubelet/pods/102b406b-1231-4532-9aa3-592aa9572571/volumes" Nov 22 11:00:32 crc kubenswrapper[4938]: E1122 11:00:32.765093 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:32 crc kubenswrapper[4938]: E1122 11:00:32.767508 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:32 crc kubenswrapper[4938]: E1122 11:00:32.769135 4938 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:32 crc kubenswrapper[4938]: E1122 11:00:32.769190 4938 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerName="nova-scheduler-scheduler" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.348341 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerStarted","Data":"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870"} Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.348392 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerStarted","Data":"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad"} Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.369813 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.369797227 podStartE2EDuration="2.369797227s" podCreationTimestamp="2025-11-22 11:00:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:33.366855605 +0000 UTC m=+1365.834693004" watchObservedRunningTime="2025-11-22 11:00:33.369797227 +0000 UTC m=+1365.837634626" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.698762 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.797449 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle\") pod \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.797502 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4j5b\" (UniqueName: \"kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b\") pod \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.797611 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts\") pod \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.797688 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data\") pod \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\" (UID: \"b51f59c0-79bd-43ee-b657-1e2aa209c3af\") " Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.802856 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts" (OuterVolumeSpecName: "scripts") pod "b51f59c0-79bd-43ee-b657-1e2aa209c3af" (UID: "b51f59c0-79bd-43ee-b657-1e2aa209c3af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.803932 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b" (OuterVolumeSpecName: "kube-api-access-k4j5b") pod "b51f59c0-79bd-43ee-b657-1e2aa209c3af" (UID: "b51f59c0-79bd-43ee-b657-1e2aa209c3af"). InnerVolumeSpecName "kube-api-access-k4j5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.824351 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b51f59c0-79bd-43ee-b657-1e2aa209c3af" (UID: "b51f59c0-79bd-43ee-b657-1e2aa209c3af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.827844 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data" (OuterVolumeSpecName: "config-data") pod "b51f59c0-79bd-43ee-b657-1e2aa209c3af" (UID: "b51f59c0-79bd-43ee-b657-1e2aa209c3af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.900607 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.900648 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.900659 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4j5b\" (UniqueName: \"kubernetes.io/projected/b51f59c0-79bd-43ee-b657-1e2aa209c3af-kube-api-access-k4j5b\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:33 crc kubenswrapper[4938]: I1122 11:00:33.900668 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b51f59c0-79bd-43ee-b657-1e2aa209c3af-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.370345 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.370408 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-dvwrn" event={"ID":"b51f59c0-79bd-43ee-b657-1e2aa209c3af","Type":"ContainerDied","Data":"c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f"} Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.370452 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7b8528b1fa4d172989f64845e2c4f317f4dca256f2407d9f5a84a6db817728f" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.440772 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 11:00:34 crc kubenswrapper[4938]: E1122 11:00:34.441520 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b51f59c0-79bd-43ee-b657-1e2aa209c3af" containerName="nova-cell1-conductor-db-sync" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.441535 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="b51f59c0-79bd-43ee-b657-1e2aa209c3af" containerName="nova-cell1-conductor-db-sync" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.441758 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="b51f59c0-79bd-43ee-b657-1e2aa209c3af" containerName="nova-cell1-conductor-db-sync" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.442428 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.445432 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.462054 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.512129 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.512361 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xbkz\" (UniqueName: \"kubernetes.io/projected/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-kube-api-access-6xbkz\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.512417 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.614659 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xbkz\" (UniqueName: \"kubernetes.io/projected/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-kube-api-access-6xbkz\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.614730 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.614784 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.618536 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.618714 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.631318 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xbkz\" (UniqueName: \"kubernetes.io/projected/6eb58a07-3ce8-43b4-a75f-a883df1d1e02-kube-api-access-6xbkz\") pod \"nova-cell1-conductor-0\" (UID: \"6eb58a07-3ce8-43b4-a75f-a883df1d1e02\") " pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:34 crc kubenswrapper[4938]: I1122 11:00:34.766355 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.251942 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.351741 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.393067 4938 generic.go:334] "Generic (PLEG): container finished" podID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" exitCode=0 Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.393136 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0ae3f585-65c4-4e6b-82e3-b113549fc229","Type":"ContainerDied","Data":"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26"} Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.393548 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0ae3f585-65c4-4e6b-82e3-b113549fc229","Type":"ContainerDied","Data":"9649b248dbfaf1601ffed3c1a04f883dc45185a22cba0206275acbf893d26ae7"} Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.393581 4938 scope.go:117] "RemoveContainer" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.393152 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.404047 4938 generic.go:334] "Generic (PLEG): container finished" podID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerID="52be20538f39c4581072490d5e9ed5b6ab387a4d4c1140225775361f812dcc66" exitCode=0 Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.404123 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerDied","Data":"52be20538f39c4581072490d5e9ed5b6ab387a4d4c1140225775361f812dcc66"} Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.407249 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6eb58a07-3ce8-43b4-a75f-a883df1d1e02","Type":"ContainerStarted","Data":"df9967b515a773d172c22cad65151695d170c4ff6ff0d197d05fd193333417f4"} Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.417384 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.430587 4938 scope.go:117] "RemoveContainer" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" Nov 22 11:00:35 crc kubenswrapper[4938]: E1122 11:00:35.431071 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26\": container with ID starting with 097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26 not found: ID does not exist" containerID="097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.431115 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26"} err="failed to get container status \"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26\": rpc error: code = NotFound desc = could not find container \"097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26\": container with ID starting with 097b21a26cce52c211adf4bfd87f6cd4a41264561965f169925c36b6a1dd7e26 not found: ID does not exist" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.447934 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rf4ck\" (UniqueName: \"kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck\") pod \"0ae3f585-65c4-4e6b-82e3-b113549fc229\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.448063 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data\") pod \"0ae3f585-65c4-4e6b-82e3-b113549fc229\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.448091 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle\") pod \"0ae3f585-65c4-4e6b-82e3-b113549fc229\" (UID: \"0ae3f585-65c4-4e6b-82e3-b113549fc229\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.458567 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck" (OuterVolumeSpecName: "kube-api-access-rf4ck") pod "0ae3f585-65c4-4e6b-82e3-b113549fc229" (UID: "0ae3f585-65c4-4e6b-82e3-b113549fc229"). InnerVolumeSpecName "kube-api-access-rf4ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.478642 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data" (OuterVolumeSpecName: "config-data") pod "0ae3f585-65c4-4e6b-82e3-b113549fc229" (UID: "0ae3f585-65c4-4e6b-82e3-b113549fc229"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.481955 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ae3f585-65c4-4e6b-82e3-b113549fc229" (UID: "0ae3f585-65c4-4e6b-82e3-b113549fc229"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.549744 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs\") pod \"84bd3edb-e3da-4697-a1b9-d058b12a5476\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.549817 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data\") pod \"84bd3edb-e3da-4697-a1b9-d058b12a5476\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.549992 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle\") pod \"84bd3edb-e3da-4697-a1b9-d058b12a5476\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.550134 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfmqf\" (UniqueName: \"kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf\") pod \"84bd3edb-e3da-4697-a1b9-d058b12a5476\" (UID: \"84bd3edb-e3da-4697-a1b9-d058b12a5476\") " Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.550855 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.550882 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae3f585-65c4-4e6b-82e3-b113549fc229-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.550894 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rf4ck\" (UniqueName: \"kubernetes.io/projected/0ae3f585-65c4-4e6b-82e3-b113549fc229-kube-api-access-rf4ck\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.551474 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs" (OuterVolumeSpecName: "logs") pod "84bd3edb-e3da-4697-a1b9-d058b12a5476" (UID: "84bd3edb-e3da-4697-a1b9-d058b12a5476"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.555522 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf" (OuterVolumeSpecName: "kube-api-access-zfmqf") pod "84bd3edb-e3da-4697-a1b9-d058b12a5476" (UID: "84bd3edb-e3da-4697-a1b9-d058b12a5476"). InnerVolumeSpecName "kube-api-access-zfmqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.575552 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data" (OuterVolumeSpecName: "config-data") pod "84bd3edb-e3da-4697-a1b9-d058b12a5476" (UID: "84bd3edb-e3da-4697-a1b9-d058b12a5476"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.578503 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84bd3edb-e3da-4697-a1b9-d058b12a5476" (UID: "84bd3edb-e3da-4697-a1b9-d058b12a5476"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.652830 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfmqf\" (UniqueName: \"kubernetes.io/projected/84bd3edb-e3da-4697-a1b9-d058b12a5476-kube-api-access-zfmqf\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.652887 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84bd3edb-e3da-4697-a1b9-d058b12a5476-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.652901 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.652933 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84bd3edb-e3da-4697-a1b9-d058b12a5476-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.730638 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.740121 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.747897 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:35 crc kubenswrapper[4938]: E1122 11:00:35.748290 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-log" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748308 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-log" Nov 22 11:00:35 crc kubenswrapper[4938]: E1122 11:00:35.748318 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerName="nova-scheduler-scheduler" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748324 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerName="nova-scheduler-scheduler" Nov 22 11:00:35 crc kubenswrapper[4938]: E1122 11:00:35.748358 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-api" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748367 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-api" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748528 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-log" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748538 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" containerName="nova-api-api" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.748550 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" containerName="nova-scheduler-scheduler" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.749175 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.750810 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.760369 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.856439 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.856604 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.856654 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2tdl\" (UniqueName: \"kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.958063 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.958272 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.958543 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2tdl\" (UniqueName: \"kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.964614 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.968407 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:35 crc kubenswrapper[4938]: I1122 11:00:35.985344 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2tdl\" (UniqueName: \"kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl\") pod \"nova-scheduler-0\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.071269 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.425824 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"84bd3edb-e3da-4697-a1b9-d058b12a5476","Type":"ContainerDied","Data":"c3aa1e6d9b6e8dcc018ec6ad276c3bb619a4d9ded17fbfa28c406842ebd09afb"} Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.426250 4938 scope.go:117] "RemoveContainer" containerID="52be20538f39c4581072490d5e9ed5b6ab387a4d4c1140225775361f812dcc66" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.425880 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.431025 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6eb58a07-3ce8-43b4-a75f-a883df1d1e02","Type":"ContainerStarted","Data":"c4fd3b8fb6d174c3ff2c5361b16ef205303a61acbbcb2234d18fd9e62c0e6781"} Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.432078 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.452010 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.451987642 podStartE2EDuration="2.451987642s" podCreationTimestamp="2025-11-22 11:00:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:36.450872805 +0000 UTC m=+1368.918710204" watchObservedRunningTime="2025-11-22 11:00:36.451987642 +0000 UTC m=+1368.919825041" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.460789 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ae3f585-65c4-4e6b-82e3-b113549fc229" path="/var/lib/kubelet/pods/0ae3f585-65c4-4e6b-82e3-b113549fc229/volumes" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.462117 4938 scope.go:117] "RemoveContainer" containerID="1af0a7eb871f4535ff940431f1525a57552b15a41974fbd9c2712eabfe4efa91" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.476373 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.484532 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.502738 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.508313 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.510358 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.516270 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.570266 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltsgl\" (UniqueName: \"kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.570327 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.570363 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.570460 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.583946 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:36 crc kubenswrapper[4938]: W1122 11:00:36.586164 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod704acafb_fb16_45ae_b763_fabf037c4a90.slice/crio-bcfcc0a1b9aec2135cdf41ac09a9b44343a6fb049d092e4b14be422666aa423e WatchSource:0}: Error finding container bcfcc0a1b9aec2135cdf41ac09a9b44343a6fb049d092e4b14be422666aa423e: Status 404 returned error can't find the container with id bcfcc0a1b9aec2135cdf41ac09a9b44343a6fb049d092e4b14be422666aa423e Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.672461 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltsgl\" (UniqueName: \"kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.672519 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.672563 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.672627 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.673170 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.676761 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.676847 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.687926 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltsgl\" (UniqueName: \"kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl\") pod \"nova-api-0\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " pod="openstack/nova-api-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.742844 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.742977 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:00:36 crc kubenswrapper[4938]: I1122 11:00:36.834226 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:37 crc kubenswrapper[4938]: I1122 11:00:37.295016 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:37 crc kubenswrapper[4938]: I1122 11:00:37.449800 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"704acafb-fb16-45ae-b763-fabf037c4a90","Type":"ContainerStarted","Data":"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73"} Nov 22 11:00:37 crc kubenswrapper[4938]: I1122 11:00:37.450813 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"704acafb-fb16-45ae-b763-fabf037c4a90","Type":"ContainerStarted","Data":"bcfcc0a1b9aec2135cdf41ac09a9b44343a6fb049d092e4b14be422666aa423e"} Nov 22 11:00:37 crc kubenswrapper[4938]: I1122 11:00:37.454356 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerStarted","Data":"f9feb6e4f801011d9c9c35adcff78f577a12a8df1f9b7661886c6572373b51cd"} Nov 22 11:00:37 crc kubenswrapper[4938]: I1122 11:00:37.483099 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.483077959 podStartE2EDuration="2.483077959s" podCreationTimestamp="2025-11-22 11:00:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:37.474714912 +0000 UTC m=+1369.942552311" watchObservedRunningTime="2025-11-22 11:00:37.483077959 +0000 UTC m=+1369.950915358" Nov 22 11:00:38 crc kubenswrapper[4938]: I1122 11:00:38.460859 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84bd3edb-e3da-4697-a1b9-d058b12a5476" path="/var/lib/kubelet/pods/84bd3edb-e3da-4697-a1b9-d058b12a5476/volumes" Nov 22 11:00:38 crc kubenswrapper[4938]: I1122 11:00:38.473047 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerStarted","Data":"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031"} Nov 22 11:00:38 crc kubenswrapper[4938]: I1122 11:00:38.473450 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerStarted","Data":"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5"} Nov 22 11:00:38 crc kubenswrapper[4938]: I1122 11:00:38.511782 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.511758116 podStartE2EDuration="2.511758116s" podCreationTimestamp="2025-11-22 11:00:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:38.503808398 +0000 UTC m=+1370.971645797" watchObservedRunningTime="2025-11-22 11:00:38.511758116 +0000 UTC m=+1370.979595515" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.072179 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.300817 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.300875 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.301006 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.301695 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.301752 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361" gracePeriod=600 Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.503714 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361" exitCode=0 Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.503762 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361"} Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.503792 4938 scope.go:117] "RemoveContainer" containerID="83d06198e1a05493993f82688b4c5e788920b5b335d6cb139a79e3c5688ff404" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.742585 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:00:41 crc kubenswrapper[4938]: I1122 11:00:41.744144 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:00:42 crc kubenswrapper[4938]: I1122 11:00:42.513905 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c"} Nov 22 11:00:42 crc kubenswrapper[4938]: I1122 11:00:42.755046 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:42 crc kubenswrapper[4938]: I1122 11:00:42.755292 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:44 crc kubenswrapper[4938]: I1122 11:00:44.797180 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 22 11:00:46 crc kubenswrapper[4938]: I1122 11:00:46.072065 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 11:00:46 crc kubenswrapper[4938]: I1122 11:00:46.106593 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 11:00:46 crc kubenswrapper[4938]: I1122 11:00:46.577382 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 11:00:46 crc kubenswrapper[4938]: I1122 11:00:46.835517 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:46 crc kubenswrapper[4938]: I1122 11:00:46.835581 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:47 crc kubenswrapper[4938]: I1122 11:00:47.918169 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:47 crc kubenswrapper[4938]: I1122 11:00:47.918234 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:51 crc kubenswrapper[4938]: I1122 11:00:51.747662 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:00:51 crc kubenswrapper[4938]: I1122 11:00:51.749049 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:00:51 crc kubenswrapper[4938]: I1122 11:00:51.754119 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:00:51 crc kubenswrapper[4938]: I1122 11:00:51.754326 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.620285 4938 generic.go:334] "Generic (PLEG): container finished" podID="45d22256-b34b-4614-8f08-97d6ff65511b" containerID="8aee91714eb7b8683df510e891af0cc2ce4e183b33f8d080061f5cc34a8ec99c" exitCode=137 Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.620378 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45d22256-b34b-4614-8f08-97d6ff65511b","Type":"ContainerDied","Data":"8aee91714eb7b8683df510e891af0cc2ce4e183b33f8d080061f5cc34a8ec99c"} Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.620767 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"45d22256-b34b-4614-8f08-97d6ff65511b","Type":"ContainerDied","Data":"4914305487d25b637c774a1fed62ea96c44f7cd81936448cd0472b3b5cfe61e0"} Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.620805 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4914305487d25b637c774a1fed62ea96c44f7cd81936448cd0472b3b5cfe61e0" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.641665 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.824455 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data\") pod \"45d22256-b34b-4614-8f08-97d6ff65511b\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.824623 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle\") pod \"45d22256-b34b-4614-8f08-97d6ff65511b\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.824694 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5b62b\" (UniqueName: \"kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b\") pod \"45d22256-b34b-4614-8f08-97d6ff65511b\" (UID: \"45d22256-b34b-4614-8f08-97d6ff65511b\") " Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.832304 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b" (OuterVolumeSpecName: "kube-api-access-5b62b") pod "45d22256-b34b-4614-8f08-97d6ff65511b" (UID: "45d22256-b34b-4614-8f08-97d6ff65511b"). InnerVolumeSpecName "kube-api-access-5b62b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.858659 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data" (OuterVolumeSpecName: "config-data") pod "45d22256-b34b-4614-8f08-97d6ff65511b" (UID: "45d22256-b34b-4614-8f08-97d6ff65511b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.859877 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45d22256-b34b-4614-8f08-97d6ff65511b" (UID: "45d22256-b34b-4614-8f08-97d6ff65511b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.927059 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.927105 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45d22256-b34b-4614-8f08-97d6ff65511b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:54 crc kubenswrapper[4938]: I1122 11:00:54.927119 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5b62b\" (UniqueName: \"kubernetes.io/projected/45d22256-b34b-4614-8f08-97d6ff65511b-kube-api-access-5b62b\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.631670 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.664577 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.698496 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.709994 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:55 crc kubenswrapper[4938]: E1122 11:00:55.710853 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d22256-b34b-4614-8f08-97d6ff65511b" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.710870 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d22256-b34b-4614-8f08-97d6ff65511b" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.711264 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d22256-b34b-4614-8f08-97d6ff65511b" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.712176 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.715897 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.716178 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.716243 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.720039 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.842941 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqjrq\" (UniqueName: \"kubernetes.io/projected/d4c78748-ba52-4ebe-b136-07c4f0d939df-kube-api-access-cqjrq\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.842997 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.843023 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.843117 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.843186 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.945428 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqjrq\" (UniqueName: \"kubernetes.io/projected/d4c78748-ba52-4ebe-b136-07c4f0d939df-kube-api-access-cqjrq\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.945485 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.945506 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.945561 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.945613 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.951665 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.953503 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.954568 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.955370 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4c78748-ba52-4ebe-b136-07c4f0d939df-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:55 crc kubenswrapper[4938]: I1122 11:00:55.964197 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqjrq\" (UniqueName: \"kubernetes.io/projected/d4c78748-ba52-4ebe-b136-07c4f0d939df-kube-api-access-cqjrq\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4c78748-ba52-4ebe-b136-07c4f0d939df\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.058776 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.457877 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d22256-b34b-4614-8f08-97d6ff65511b" path="/var/lib/kubelet/pods/45d22256-b34b-4614-8f08-97d6ff65511b/volumes" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.495065 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.640561 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d4c78748-ba52-4ebe-b136-07c4f0d939df","Type":"ContainerStarted","Data":"894ef5e9c5d476c3c6eaf0d5d6a692803729932ef3e94e32c2d6a7c3bf865b59"} Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.841498 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.842150 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.842734 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:00:56 crc kubenswrapper[4938]: I1122 11:00:56.853059 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.651033 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d4c78748-ba52-4ebe-b136-07c4f0d939df","Type":"ContainerStarted","Data":"abe069ea8697b3f9c0d939c1d870e59520136e7dc44914f2cbeb4d97da5f84c5"} Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.651710 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.654761 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.676642 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.676616856 podStartE2EDuration="2.676616856s" podCreationTimestamp="2025-11-22 11:00:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:57.667157062 +0000 UTC m=+1390.134994471" watchObservedRunningTime="2025-11-22 11:00:57.676616856 +0000 UTC m=+1390.144454255" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.872190 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.874211 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.886386 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.987948 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.988076 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.988182 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.988220 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftqft\" (UniqueName: \"kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.988250 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:57 crc kubenswrapper[4938]: I1122 11:00:57.988277 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.089953 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090025 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftqft\" (UniqueName: \"kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090062 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090082 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090137 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090187 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090954 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.090965 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.091669 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.091837 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.091939 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.120707 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftqft\" (UniqueName: \"kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft\") pod \"dnsmasq-dns-cd5cbd7b9-8cftt\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.205628 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:00:58 crc kubenswrapper[4938]: I1122 11:00:58.694307 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:00:59 crc kubenswrapper[4938]: I1122 11:00:59.669413 4938 generic.go:334] "Generic (PLEG): container finished" podID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerID="364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2" exitCode=0 Nov 22 11:00:59 crc kubenswrapper[4938]: I1122 11:00:59.669452 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" event={"ID":"58b89450-2eae-4604-b2e3-eb29ab66f574","Type":"ContainerDied","Data":"364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2"} Nov 22 11:00:59 crc kubenswrapper[4938]: I1122 11:00:59.669800 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" event={"ID":"58b89450-2eae-4604-b2e3-eb29ab66f574","Type":"ContainerStarted","Data":"34e46163c88abb45b1aead8df895bed451ea34938e1acbf9f664bf44d766c0ac"} Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.129332 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29396821-fw2vs"] Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.131065 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.141862 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396821-fw2vs"] Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.232871 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.233577 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.233688 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm9sf\" (UniqueName: \"kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.233814 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.259604 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.260130 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-central-agent" containerID="cri-o://721a7ca8713e0eac25790794b11fb0ba077d45efa26ddec6458d6794eb239df9" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.260269 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-notification-agent" containerID="cri-o://e9ca44502bada600f84c79512fe7e3e9a584a5309ce226beaa15211e8a6b4373" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.260250 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="sg-core" containerID="cri-o://036e5c6978df991b9af76e857b5a0b281796b76f4d8ad2e761a4e856411fa59b" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.260554 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="proxy-httpd" containerID="cri-o://a28f9a7d43054481cd014004ec6725abbfc8b745e6826dd9f786c79bdfd63bca" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.336226 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.336493 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm9sf\" (UniqueName: \"kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.336575 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.336708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.341722 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.341782 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.341960 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.360945 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm9sf\" (UniqueName: \"kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf\") pod \"keystone-cron-29396821-fw2vs\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.451036 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.527190 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.687343 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" event={"ID":"58b89450-2eae-4604-b2e3-eb29ab66f574","Type":"ContainerStarted","Data":"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d"} Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.687680 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.692755 4938 generic.go:334] "Generic (PLEG): container finished" podID="797c3455-d508-4258-942c-1fe85abd0bcb" containerID="a28f9a7d43054481cd014004ec6725abbfc8b745e6826dd9f786c79bdfd63bca" exitCode=0 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.692781 4938 generic.go:334] "Generic (PLEG): container finished" podID="797c3455-d508-4258-942c-1fe85abd0bcb" containerID="036e5c6978df991b9af76e857b5a0b281796b76f4d8ad2e761a4e856411fa59b" exitCode=2 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.692976 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-log" containerID="cri-o://4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.693999 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerDied","Data":"a28f9a7d43054481cd014004ec6725abbfc8b745e6826dd9f786c79bdfd63bca"} Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.694030 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerDied","Data":"036e5c6978df991b9af76e857b5a0b281796b76f4d8ad2e761a4e856411fa59b"} Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.694093 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-api" containerID="cri-o://cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031" gracePeriod=30 Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.715479 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" podStartSLOduration=3.715462037 podStartE2EDuration="3.715462037s" podCreationTimestamp="2025-11-22 11:00:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:00.71236831 +0000 UTC m=+1393.180205709" watchObservedRunningTime="2025-11-22 11:01:00.715462037 +0000 UTC m=+1393.183299436" Nov 22 11:01:00 crc kubenswrapper[4938]: I1122 11:01:00.898842 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396821-fw2vs"] Nov 22 11:01:00 crc kubenswrapper[4938]: W1122 11:01:00.901222 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad31137e_3dac_4a06_9b17_e54340147400.slice/crio-05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03 WatchSource:0}: Error finding container 05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03: Status 404 returned error can't find the container with id 05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03 Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.059659 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.703544 4938 generic.go:334] "Generic (PLEG): container finished" podID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerID="4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5" exitCode=143 Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.703957 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerDied","Data":"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5"} Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.707171 4938 generic.go:334] "Generic (PLEG): container finished" podID="797c3455-d508-4258-942c-1fe85abd0bcb" containerID="721a7ca8713e0eac25790794b11fb0ba077d45efa26ddec6458d6794eb239df9" exitCode=0 Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.707261 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerDied","Data":"721a7ca8713e0eac25790794b11fb0ba077d45efa26ddec6458d6794eb239df9"} Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.709640 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-fw2vs" event={"ID":"ad31137e-3dac-4a06-9b17-e54340147400","Type":"ContainerStarted","Data":"6a93d1d8187f785744702fb133b161d84bd34b9dd31ef925f3d36d048f6fc25b"} Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.709734 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-fw2vs" event={"ID":"ad31137e-3dac-4a06-9b17-e54340147400","Type":"ContainerStarted","Data":"05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03"} Nov 22 11:01:01 crc kubenswrapper[4938]: I1122 11:01:01.732984 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29396821-fw2vs" podStartSLOduration=1.732962516 podStartE2EDuration="1.732962516s" podCreationTimestamp="2025-11-22 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:01.724209759 +0000 UTC m=+1394.192047158" watchObservedRunningTime="2025-11-22 11:01:01.732962516 +0000 UTC m=+1394.200799915" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.724361 4938 generic.go:334] "Generic (PLEG): container finished" podID="797c3455-d508-4258-942c-1fe85abd0bcb" containerID="e9ca44502bada600f84c79512fe7e3e9a584a5309ce226beaa15211e8a6b4373" exitCode=0 Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.724457 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerDied","Data":"e9ca44502bada600f84c79512fe7e3e9a584a5309ce226beaa15211e8a6b4373"} Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.724703 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"797c3455-d508-4258-942c-1fe85abd0bcb","Type":"ContainerDied","Data":"877d9f70e3c695db61df657aadf1022c235c87853e5b86b3f183d5227c2579a6"} Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.724717 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="877d9f70e3c695db61df657aadf1022c235c87853e5b86b3f183d5227c2579a6" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.778769 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.891046 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.891152 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.891390 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.891525 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrwsk\" (UniqueName: \"kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.891583 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.892061 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.892139 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.892644 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.892708 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.892742 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle\") pod \"797c3455-d508-4258-942c-1fe85abd0bcb\" (UID: \"797c3455-d508-4258-942c-1fe85abd0bcb\") " Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.893517 4938 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.893546 4938 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/797c3455-d508-4258-942c-1fe85abd0bcb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.897722 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk" (OuterVolumeSpecName: "kube-api-access-jrwsk") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "kube-api-access-jrwsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.898230 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts" (OuterVolumeSpecName: "scripts") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.923150 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.959251 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.985314 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995090 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrwsk\" (UniqueName: \"kubernetes.io/projected/797c3455-d508-4258-942c-1fe85abd0bcb-kube-api-access-jrwsk\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995122 4938 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995137 4938 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995150 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995161 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4938]: I1122 11:01:02.995207 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data" (OuterVolumeSpecName: "config-data") pod "797c3455-d508-4258-942c-1fe85abd0bcb" (UID: "797c3455-d508-4258-942c-1fe85abd0bcb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.096842 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/797c3455-d508-4258-942c-1fe85abd0bcb-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.734837 4938 generic.go:334] "Generic (PLEG): container finished" podID="ad31137e-3dac-4a06-9b17-e54340147400" containerID="6a93d1d8187f785744702fb133b161d84bd34b9dd31ef925f3d36d048f6fc25b" exitCode=0 Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.734957 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.734947 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-fw2vs" event={"ID":"ad31137e-3dac-4a06-9b17-e54340147400","Type":"ContainerDied","Data":"6a93d1d8187f785744702fb133b161d84bd34b9dd31ef925f3d36d048f6fc25b"} Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.773574 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.780535 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.801342 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:03 crc kubenswrapper[4938]: E1122 11:01:03.801790 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="sg-core" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.801811 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="sg-core" Nov 22 11:01:03 crc kubenswrapper[4938]: E1122 11:01:03.801833 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-notification-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.801841 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-notification-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: E1122 11:01:03.801864 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-central-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.801871 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-central-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: E1122 11:01:03.801888 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="proxy-httpd" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.801898 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="proxy-httpd" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.802568 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-central-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.802598 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="ceilometer-notification-agent" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.802616 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="proxy-httpd" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.802638 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" containerName="sg-core" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.804818 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.807420 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.807723 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.815333 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.821133 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.910770 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.911054 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-scripts\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.911167 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-log-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.911268 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f65lz\" (UniqueName: \"kubernetes.io/projected/30d989f4-c834-49a2-aeaf-6478a2318852-kube-api-access-f65lz\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.911440 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-run-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.911975 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.912011 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:03 crc kubenswrapper[4938]: I1122 11:01:03.912057 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-config-data\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014038 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014116 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-scripts\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014157 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-log-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014195 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f65lz\" (UniqueName: \"kubernetes.io/projected/30d989f4-c834-49a2-aeaf-6478a2318852-kube-api-access-f65lz\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014236 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-run-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014281 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014298 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014323 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-config-data\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.014984 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-run-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.015330 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/30d989f4-c834-49a2-aeaf-6478a2318852-log-httpd\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.018770 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.028351 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.028432 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.028628 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-scripts\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.030184 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f65lz\" (UniqueName: \"kubernetes.io/projected/30d989f4-c834-49a2-aeaf-6478a2318852-kube-api-access-f65lz\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.030339 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30d989f4-c834-49a2-aeaf-6478a2318852-config-data\") pod \"ceilometer-0\" (UID: \"30d989f4-c834-49a2-aeaf-6478a2318852\") " pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.126559 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.375013 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.461683 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="797c3455-d508-4258-942c-1fe85abd0bcb" path="/var/lib/kubelet/pods/797c3455-d508-4258-942c-1fe85abd0bcb/volumes" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.521874 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs\") pod \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.521981 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data\") pod \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.522172 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle\") pod \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.522256 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltsgl\" (UniqueName: \"kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl\") pod \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\" (UID: \"cc483d29-e2db-40b0-ba44-d20bf2783d1d\") " Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.524156 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs" (OuterVolumeSpecName: "logs") pod "cc483d29-e2db-40b0-ba44-d20bf2783d1d" (UID: "cc483d29-e2db-40b0-ba44-d20bf2783d1d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.528026 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl" (OuterVolumeSpecName: "kube-api-access-ltsgl") pod "cc483d29-e2db-40b0-ba44-d20bf2783d1d" (UID: "cc483d29-e2db-40b0-ba44-d20bf2783d1d"). InnerVolumeSpecName "kube-api-access-ltsgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.560050 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc483d29-e2db-40b0-ba44-d20bf2783d1d" (UID: "cc483d29-e2db-40b0-ba44-d20bf2783d1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.563312 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data" (OuterVolumeSpecName: "config-data") pod "cc483d29-e2db-40b0-ba44-d20bf2783d1d" (UID: "cc483d29-e2db-40b0-ba44-d20bf2783d1d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.627075 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.627155 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltsgl\" (UniqueName: \"kubernetes.io/projected/cc483d29-e2db-40b0-ba44-d20bf2783d1d-kube-api-access-ltsgl\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.627170 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc483d29-e2db-40b0-ba44-d20bf2783d1d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.627182 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc483d29-e2db-40b0-ba44-d20bf2783d1d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.660237 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 11:01:04 crc kubenswrapper[4938]: W1122 11:01:04.671732 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30d989f4_c834_49a2_aeaf_6478a2318852.slice/crio-35cb710453a124dba02e907286ad041070a402a92c2acc246fe84b4b43b789a9 WatchSource:0}: Error finding container 35cb710453a124dba02e907286ad041070a402a92c2acc246fe84b4b43b789a9: Status 404 returned error can't find the container with id 35cb710453a124dba02e907286ad041070a402a92c2acc246fe84b4b43b789a9 Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.675384 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.750994 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30d989f4-c834-49a2-aeaf-6478a2318852","Type":"ContainerStarted","Data":"35cb710453a124dba02e907286ad041070a402a92c2acc246fe84b4b43b789a9"} Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.754119 4938 generic.go:334] "Generic (PLEG): container finished" podID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerID="cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031" exitCode=0 Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.754302 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.760965 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerDied","Data":"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031"} Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.761050 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc483d29-e2db-40b0-ba44-d20bf2783d1d","Type":"ContainerDied","Data":"f9feb6e4f801011d9c9c35adcff78f577a12a8df1f9b7661886c6572373b51cd"} Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.761081 4938 scope.go:117] "RemoveContainer" containerID="cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.814965 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.856959 4938 scope.go:117] "RemoveContainer" containerID="4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.863315 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.880505 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:04 crc kubenswrapper[4938]: E1122 11:01:04.881389 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-log" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.881412 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-log" Nov 22 11:01:04 crc kubenswrapper[4938]: E1122 11:01:04.881457 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-api" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.881465 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-api" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.881630 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-api" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.881658 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" containerName="nova-api-log" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.884419 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.886806 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.886998 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.890803 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.897681 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.912058 4938 scope.go:117] "RemoveContainer" containerID="cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031" Nov 22 11:01:04 crc kubenswrapper[4938]: E1122 11:01:04.917071 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031\": container with ID starting with cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031 not found: ID does not exist" containerID="cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.917134 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031"} err="failed to get container status \"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031\": rpc error: code = NotFound desc = could not find container \"cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031\": container with ID starting with cdc2b538b6f2f70132b7ae2cd1239e5246358344b9c729638a78b73af5ca8031 not found: ID does not exist" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.917171 4938 scope.go:117] "RemoveContainer" containerID="4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5" Nov 22 11:01:04 crc kubenswrapper[4938]: E1122 11:01:04.921996 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5\": container with ID starting with 4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5 not found: ID does not exist" containerID="4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.922151 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5"} err="failed to get container status \"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5\": rpc error: code = NotFound desc = could not find container \"4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5\": container with ID starting with 4e5f23ba19b1ee9ee6c362640f4237f486ee8ad56adbc6d1d4bad4004456afb5 not found: ID does not exist" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939171 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939369 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939487 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939522 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939558 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgqhx\" (UniqueName: \"kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:04 crc kubenswrapper[4938]: I1122 11:01:04.939616 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041176 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041343 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041418 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041499 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041540 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.041563 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgqhx\" (UniqueName: \"kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.042351 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.046966 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.047623 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.047704 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.051168 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.070513 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgqhx\" (UniqueName: \"kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx\") pod \"nova-api-0\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.164575 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.213294 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.347635 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys\") pod \"ad31137e-3dac-4a06-9b17-e54340147400\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.348092 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm9sf\" (UniqueName: \"kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf\") pod \"ad31137e-3dac-4a06-9b17-e54340147400\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.348181 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data\") pod \"ad31137e-3dac-4a06-9b17-e54340147400\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.348346 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle\") pod \"ad31137e-3dac-4a06-9b17-e54340147400\" (UID: \"ad31137e-3dac-4a06-9b17-e54340147400\") " Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.357486 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf" (OuterVolumeSpecName: "kube-api-access-mm9sf") pod "ad31137e-3dac-4a06-9b17-e54340147400" (UID: "ad31137e-3dac-4a06-9b17-e54340147400"). InnerVolumeSpecName "kube-api-access-mm9sf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.360076 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ad31137e-3dac-4a06-9b17-e54340147400" (UID: "ad31137e-3dac-4a06-9b17-e54340147400"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.389637 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad31137e-3dac-4a06-9b17-e54340147400" (UID: "ad31137e-3dac-4a06-9b17-e54340147400"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.410191 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data" (OuterVolumeSpecName: "config-data") pod "ad31137e-3dac-4a06-9b17-e54340147400" (UID: "ad31137e-3dac-4a06-9b17-e54340147400"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.451599 4938 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.451632 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mm9sf\" (UniqueName: \"kubernetes.io/projected/ad31137e-3dac-4a06-9b17-e54340147400-kube-api-access-mm9sf\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.451641 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.451651 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad31137e-3dac-4a06-9b17-e54340147400-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.679215 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.793055 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerStarted","Data":"35c77513942d8d0fd1dc6d89223ec799ba97462f0b3bd6d610bed6ee263f9a72"} Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.796523 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-fw2vs" event={"ID":"ad31137e-3dac-4a06-9b17-e54340147400","Type":"ContainerDied","Data":"05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03"} Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.796594 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05564ab4071d454d1f99588923f1ebc3925e496d10f90c029da571d65001fd03" Nov 22 11:01:05 crc kubenswrapper[4938]: I1122 11:01:05.796700 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-fw2vs" Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.059279 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.092388 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.481468 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc483d29-e2db-40b0-ba44-d20bf2783d1d" path="/var/lib/kubelet/pods/cc483d29-e2db-40b0-ba44-d20bf2783d1d/volumes" Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.813673 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerStarted","Data":"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587"} Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.813742 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerStarted","Data":"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf"} Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.824740 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30d989f4-c834-49a2-aeaf-6478a2318852","Type":"ContainerStarted","Data":"90b9dda1e2775cac736cd01ce2ea8b05d9ccb0229dd47bddcce4d05a2030c7ac"} Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.845979 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 22 11:01:06 crc kubenswrapper[4938]: I1122 11:01:06.852457 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.852441427 podStartE2EDuration="2.852441427s" podCreationTimestamp="2025-11-22 11:01:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:06.83964912 +0000 UTC m=+1399.307486519" watchObservedRunningTime="2025-11-22 11:01:06.852441427 +0000 UTC m=+1399.320278816" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.073880 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-f4dlk"] Nov 22 11:01:07 crc kubenswrapper[4938]: E1122 11:01:07.074357 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad31137e-3dac-4a06-9b17-e54340147400" containerName="keystone-cron" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.074375 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad31137e-3dac-4a06-9b17-e54340147400" containerName="keystone-cron" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.074585 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad31137e-3dac-4a06-9b17-e54340147400" containerName="keystone-cron" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.075227 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.082284 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f4dlk"] Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.082553 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.082725 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.199630 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj5mb\" (UniqueName: \"kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.199838 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.200050 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.200194 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.301929 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.302059 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj5mb\" (UniqueName: \"kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.302125 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.302187 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.307223 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.307398 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.310694 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.323366 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj5mb\" (UniqueName: \"kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb\") pod \"nova-cell1-cell-mapping-f4dlk\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:07 crc kubenswrapper[4938]: I1122 11:01:07.404605 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.135833 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-f4dlk"] Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.208540 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.263385 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.263958 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="dnsmasq-dns" containerID="cri-o://788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112" gracePeriod=10 Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.732702 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734710 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734749 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734806 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjkvz\" (UniqueName: \"kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734836 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734870 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.734971 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc\") pod \"413e9101-7406-46b5-b183-87c6b75b1ac8\" (UID: \"413e9101-7406-46b5-b183-87c6b75b1ac8\") " Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.758520 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz" (OuterVolumeSpecName: "kube-api-access-gjkvz") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "kube-api-access-gjkvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.799860 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.815386 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.818700 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config" (OuterVolumeSpecName: "config") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.825353 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.838165 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.838200 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.838209 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjkvz\" (UniqueName: \"kubernetes.io/projected/413e9101-7406-46b5-b183-87c6b75b1ac8-kube-api-access-gjkvz\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.838221 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.838230 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.867314 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30d989f4-c834-49a2-aeaf-6478a2318852","Type":"ContainerStarted","Data":"fde54ba4e4b5737eca1ab1847f8a331bbbecfdc93a79f691ba8c51f270c77cc2"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.867369 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30d989f4-c834-49a2-aeaf-6478a2318852","Type":"ContainerStarted","Data":"dda0cbd80ce476e8e4a197b039bbd85d7bff74d0c2f2e542bb91be8735032763"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.869430 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f4dlk" event={"ID":"a6b7624c-96ce-45d5-bc85-4549bc1c0988","Type":"ContainerStarted","Data":"5dca54e774d1c8690cd3878e26ee81de2824078f8e56e21bfc120572170c418c"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.869511 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f4dlk" event={"ID":"a6b7624c-96ce-45d5-bc85-4549bc1c0988","Type":"ContainerStarted","Data":"428c55f463ae9ee85adb5964388f91849548c84de15adb1ddd79234e8b2632f9"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.872556 4938 generic.go:334] "Generic (PLEG): container finished" podID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerID="788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112" exitCode=0 Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.872593 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" event={"ID":"413e9101-7406-46b5-b183-87c6b75b1ac8","Type":"ContainerDied","Data":"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.872613 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" event={"ID":"413e9101-7406-46b5-b183-87c6b75b1ac8","Type":"ContainerDied","Data":"fb331b046a876ed9a380749a7a2eb4472543fe9ea23dcf4ba1273915427d14d7"} Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.872635 4938 scope.go:117] "RemoveContainer" containerID="788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.872795 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-hxnp6" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.886027 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-f4dlk" podStartSLOduration=1.886004062 podStartE2EDuration="1.886004062s" podCreationTimestamp="2025-11-22 11:01:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:08.883957311 +0000 UTC m=+1401.351794710" watchObservedRunningTime="2025-11-22 11:01:08.886004062 +0000 UTC m=+1401.353841461" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.886412 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "413e9101-7406-46b5-b183-87c6b75b1ac8" (UID: "413e9101-7406-46b5-b183-87c6b75b1ac8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.900215 4938 scope.go:117] "RemoveContainer" containerID="24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.926087 4938 scope.go:117] "RemoveContainer" containerID="788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112" Nov 22 11:01:08 crc kubenswrapper[4938]: E1122 11:01:08.926572 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112\": container with ID starting with 788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112 not found: ID does not exist" containerID="788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.926624 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112"} err="failed to get container status \"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112\": rpc error: code = NotFound desc = could not find container \"788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112\": container with ID starting with 788ada1be8497c48f73bdf0a682c9f8ae7622c31c87390e0c57e4aa73248e112 not found: ID does not exist" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.926655 4938 scope.go:117] "RemoveContainer" containerID="24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d" Nov 22 11:01:08 crc kubenswrapper[4938]: E1122 11:01:08.927061 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d\": container with ID starting with 24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d not found: ID does not exist" containerID="24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.927206 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d"} err="failed to get container status \"24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d\": rpc error: code = NotFound desc = could not find container \"24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d\": container with ID starting with 24a565116f55b7267b22cf137885bddc6e1bee8f5397bdb9d797e8a836b2038d not found: ID does not exist" Nov 22 11:01:08 crc kubenswrapper[4938]: I1122 11:01:08.940171 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/413e9101-7406-46b5-b183-87c6b75b1ac8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:09 crc kubenswrapper[4938]: I1122 11:01:09.213090 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:01:09 crc kubenswrapper[4938]: I1122 11:01:09.226768 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-hxnp6"] Nov 22 11:01:10 crc kubenswrapper[4938]: I1122 11:01:10.459638 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" path="/var/lib/kubelet/pods/413e9101-7406-46b5-b183-87c6b75b1ac8/volumes" Nov 22 11:01:10 crc kubenswrapper[4938]: I1122 11:01:10.925060 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"30d989f4-c834-49a2-aeaf-6478a2318852","Type":"ContainerStarted","Data":"0d399a9b991f96925779e8e9115f6acf17bf95e44aef19307f8365950da16c4c"} Nov 22 11:01:10 crc kubenswrapper[4938]: I1122 11:01:10.925518 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 11:01:10 crc kubenswrapper[4938]: I1122 11:01:10.954383 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.882276111 podStartE2EDuration="7.954367618s" podCreationTimestamp="2025-11-22 11:01:03 +0000 UTC" firstStartedPulling="2025-11-22 11:01:04.675131251 +0000 UTC m=+1397.142968640" lastFinishedPulling="2025-11-22 11:01:09.747222748 +0000 UTC m=+1402.215060147" observedRunningTime="2025-11-22 11:01:10.951047085 +0000 UTC m=+1403.418884484" watchObservedRunningTime="2025-11-22 11:01:10.954367618 +0000 UTC m=+1403.422205017" Nov 22 11:01:13 crc kubenswrapper[4938]: I1122 11:01:13.953715 4938 generic.go:334] "Generic (PLEG): container finished" podID="a6b7624c-96ce-45d5-bc85-4549bc1c0988" containerID="5dca54e774d1c8690cd3878e26ee81de2824078f8e56e21bfc120572170c418c" exitCode=0 Nov 22 11:01:13 crc kubenswrapper[4938]: I1122 11:01:13.953820 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f4dlk" event={"ID":"a6b7624c-96ce-45d5-bc85-4549bc1c0988","Type":"ContainerDied","Data":"5dca54e774d1c8690cd3878e26ee81de2824078f8e56e21bfc120572170c418c"} Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.217672 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.218004 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.315011 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.459005 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts\") pod \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.459483 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data\") pod \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.459969 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lj5mb\" (UniqueName: \"kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb\") pod \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.460102 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle\") pod \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\" (UID: \"a6b7624c-96ce-45d5-bc85-4549bc1c0988\") " Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.470214 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb" (OuterVolumeSpecName: "kube-api-access-lj5mb") pod "a6b7624c-96ce-45d5-bc85-4549bc1c0988" (UID: "a6b7624c-96ce-45d5-bc85-4549bc1c0988"). InnerVolumeSpecName "kube-api-access-lj5mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.470392 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts" (OuterVolumeSpecName: "scripts") pod "a6b7624c-96ce-45d5-bc85-4549bc1c0988" (UID: "a6b7624c-96ce-45d5-bc85-4549bc1c0988"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.487018 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6b7624c-96ce-45d5-bc85-4549bc1c0988" (UID: "a6b7624c-96ce-45d5-bc85-4549bc1c0988"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.505690 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data" (OuterVolumeSpecName: "config-data") pod "a6b7624c-96ce-45d5-bc85-4549bc1c0988" (UID: "a6b7624c-96ce-45d5-bc85-4549bc1c0988"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.562436 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.562482 4938 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.562492 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b7624c-96ce-45d5-bc85-4549bc1c0988-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.562502 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lj5mb\" (UniqueName: \"kubernetes.io/projected/a6b7624c-96ce-45d5-bc85-4549bc1c0988-kube-api-access-lj5mb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.971830 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-f4dlk" event={"ID":"a6b7624c-96ce-45d5-bc85-4549bc1c0988","Type":"ContainerDied","Data":"428c55f463ae9ee85adb5964388f91849548c84de15adb1ddd79234e8b2632f9"} Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.972216 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="428c55f463ae9ee85adb5964388f91849548c84de15adb1ddd79234e8b2632f9" Nov 22 11:01:15 crc kubenswrapper[4938]: I1122 11:01:15.971882 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-f4dlk" Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.059767 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.060045 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-log" containerID="cri-o://5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf" gracePeriod=30 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.060120 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-api" containerID="cri-o://545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587" gracePeriod=30 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.067782 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": EOF" Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.067781 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.201:8774/\": EOF" Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.075936 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.076409 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="704acafb-fb16-45ae-b763-fabf037c4a90" containerName="nova-scheduler-scheduler" containerID="cri-o://fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73" gracePeriod=30 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.094665 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.095242 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" containerID="cri-o://b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad" gracePeriod=30 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.094901 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" containerID="cri-o://d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870" gracePeriod=30 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.986912 4938 generic.go:334] "Generic (PLEG): container finished" podID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerID="5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf" exitCode=143 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.986964 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerDied","Data":"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf"} Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.989791 4938 generic.go:334] "Generic (PLEG): container finished" podID="768c8171-f2ed-498d-bba5-debf776d1a25" containerID="b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad" exitCode=143 Nov 22 11:01:16 crc kubenswrapper[4938]: I1122 11:01:16.989828 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerDied","Data":"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad"} Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.239253 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:52908->10.217.0.193:8775: read: connection reset by peer" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.239322 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": read tcp 10.217.0.2:52910->10.217.0.193:8775: read: connection reset by peer" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.668971 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.837159 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm4jz\" (UniqueName: \"kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz\") pod \"768c8171-f2ed-498d-bba5-debf776d1a25\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.837550 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data\") pod \"768c8171-f2ed-498d-bba5-debf776d1a25\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.837582 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs\") pod \"768c8171-f2ed-498d-bba5-debf776d1a25\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.837697 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs\") pod \"768c8171-f2ed-498d-bba5-debf776d1a25\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.837712 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle\") pod \"768c8171-f2ed-498d-bba5-debf776d1a25\" (UID: \"768c8171-f2ed-498d-bba5-debf776d1a25\") " Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.838263 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs" (OuterVolumeSpecName: "logs") pod "768c8171-f2ed-498d-bba5-debf776d1a25" (UID: "768c8171-f2ed-498d-bba5-debf776d1a25"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.867009 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz" (OuterVolumeSpecName: "kube-api-access-sm4jz") pod "768c8171-f2ed-498d-bba5-debf776d1a25" (UID: "768c8171-f2ed-498d-bba5-debf776d1a25"). InnerVolumeSpecName "kube-api-access-sm4jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.884577 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "768c8171-f2ed-498d-bba5-debf776d1a25" (UID: "768c8171-f2ed-498d-bba5-debf776d1a25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.900517 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "768c8171-f2ed-498d-bba5-debf776d1a25" (UID: "768c8171-f2ed-498d-bba5-debf776d1a25"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.909714 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data" (OuterVolumeSpecName: "config-data") pod "768c8171-f2ed-498d-bba5-debf776d1a25" (UID: "768c8171-f2ed-498d-bba5-debf776d1a25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.941655 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm4jz\" (UniqueName: \"kubernetes.io/projected/768c8171-f2ed-498d-bba5-debf776d1a25-kube-api-access-sm4jz\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.941691 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.941703 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/768c8171-f2ed-498d-bba5-debf776d1a25-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.941713 4938 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:19 crc kubenswrapper[4938]: I1122 11:01:19.941722 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768c8171-f2ed-498d-bba5-debf776d1a25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.020771 4938 generic.go:334] "Generic (PLEG): container finished" podID="768c8171-f2ed-498d-bba5-debf776d1a25" containerID="d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870" exitCode=0 Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.020823 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerDied","Data":"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870"} Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.020858 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"768c8171-f2ed-498d-bba5-debf776d1a25","Type":"ContainerDied","Data":"c3cf71930dfc481b9b3a5c67d4db6c4df7e08102bc87f48c01ad874f00bd6e79"} Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.020879 4938 scope.go:117] "RemoveContainer" containerID="d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.021075 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.063264 4938 scope.go:117] "RemoveContainer" containerID="b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.097635 4938 scope.go:117] "RemoveContainer" containerID="d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.101073 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.101174 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870\": container with ID starting with d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870 not found: ID does not exist" containerID="d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.101217 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870"} err="failed to get container status \"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870\": rpc error: code = NotFound desc = could not find container \"d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870\": container with ID starting with d951cb39cb2ecac8dcc1e86794092f5584f74d04c0a8ec37ce9b6666f3168870 not found: ID does not exist" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.101251 4938 scope.go:117] "RemoveContainer" containerID="b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad" Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.102563 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad\": container with ID starting with b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad not found: ID does not exist" containerID="b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.102609 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad"} err="failed to get container status \"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad\": rpc error: code = NotFound desc = could not find container \"b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad\": container with ID starting with b5d63eb835da52c9fccd9ed022f26b6f91916f9083b67e29bde5d65ca453c3ad not found: ID does not exist" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.112191 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.119172 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.119771 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.120494 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.120529 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.120594 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.120663 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="init" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.120675 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="init" Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.120752 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="dnsmasq-dns" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.120763 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="dnsmasq-dns" Nov 22 11:01:20 crc kubenswrapper[4938]: E1122 11:01:20.120783 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b7624c-96ce-45d5-bc85-4549bc1c0988" containerName="nova-manage" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.120793 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b7624c-96ce-45d5-bc85-4549bc1c0988" containerName="nova-manage" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.121178 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-metadata" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.121200 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" containerName="nova-metadata-log" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.121242 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b7624c-96ce-45d5-bc85-4549bc1c0988" containerName="nova-manage" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.121260 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="413e9101-7406-46b5-b183-87c6b75b1ac8" containerName="dnsmasq-dns" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.123758 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.125507 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.125720 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.130201 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.154823 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-config-data\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.154893 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.155027 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/209e9603-14ba-4706-87a0-00ea7f2bd737-logs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.155057 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.155088 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9fpr\" (UniqueName: \"kubernetes.io/projected/209e9603-14ba-4706-87a0-00ea7f2bd737-kube-api-access-k9fpr\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.256011 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-config-data\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.256346 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.256373 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/209e9603-14ba-4706-87a0-00ea7f2bd737-logs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.256398 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.256421 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9fpr\" (UniqueName: \"kubernetes.io/projected/209e9603-14ba-4706-87a0-00ea7f2bd737-kube-api-access-k9fpr\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.257023 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/209e9603-14ba-4706-87a0-00ea7f2bd737-logs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.261433 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.261948 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-config-data\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.264673 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/209e9603-14ba-4706-87a0-00ea7f2bd737-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.285638 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9fpr\" (UniqueName: \"kubernetes.io/projected/209e9603-14ba-4706-87a0-00ea7f2bd737-kube-api-access-k9fpr\") pod \"nova-metadata-0\" (UID: \"209e9603-14ba-4706-87a0-00ea7f2bd737\") " pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.444997 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.460515 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="768c8171-f2ed-498d-bba5-debf776d1a25" path="/var/lib/kubelet/pods/768c8171-f2ed-498d-bba5-debf776d1a25/volumes" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.536171 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.661422 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2tdl\" (UniqueName: \"kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl\") pod \"704acafb-fb16-45ae-b763-fabf037c4a90\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.661879 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data\") pod \"704acafb-fb16-45ae-b763-fabf037c4a90\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.661959 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle\") pod \"704acafb-fb16-45ae-b763-fabf037c4a90\" (UID: \"704acafb-fb16-45ae-b763-fabf037c4a90\") " Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.671536 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl" (OuterVolumeSpecName: "kube-api-access-x2tdl") pod "704acafb-fb16-45ae-b763-fabf037c4a90" (UID: "704acafb-fb16-45ae-b763-fabf037c4a90"). InnerVolumeSpecName "kube-api-access-x2tdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.691798 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "704acafb-fb16-45ae-b763-fabf037c4a90" (UID: "704acafb-fb16-45ae-b763-fabf037c4a90"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.692387 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data" (OuterVolumeSpecName: "config-data") pod "704acafb-fb16-45ae-b763-fabf037c4a90" (UID: "704acafb-fb16-45ae-b763-fabf037c4a90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.764447 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2tdl\" (UniqueName: \"kubernetes.io/projected/704acafb-fb16-45ae-b763-fabf037c4a90-kube-api-access-x2tdl\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.764481 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.764494 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/704acafb-fb16-45ae-b763-fabf037c4a90-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:20 crc kubenswrapper[4938]: I1122 11:01:20.879176 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:01:20 crc kubenswrapper[4938]: W1122 11:01:20.882656 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod209e9603_14ba_4706_87a0_00ea7f2bd737.slice/crio-4388f54a16854ac0069dcec78a0d040263359fc5987f8c98128138f01a72bd4a WatchSource:0}: Error finding container 4388f54a16854ac0069dcec78a0d040263359fc5987f8c98128138f01a72bd4a: Status 404 returned error can't find the container with id 4388f54a16854ac0069dcec78a0d040263359fc5987f8c98128138f01a72bd4a Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.034812 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"209e9603-14ba-4706-87a0-00ea7f2bd737","Type":"ContainerStarted","Data":"c88b95dc84af6342ac353c75b4b865f670d115aa7c2b02ff2aff2658ebaa3f94"} Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.034866 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"209e9603-14ba-4706-87a0-00ea7f2bd737","Type":"ContainerStarted","Data":"4388f54a16854ac0069dcec78a0d040263359fc5987f8c98128138f01a72bd4a"} Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.036524 4938 generic.go:334] "Generic (PLEG): container finished" podID="704acafb-fb16-45ae-b763-fabf037c4a90" containerID="fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73" exitCode=0 Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.036577 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"704acafb-fb16-45ae-b763-fabf037c4a90","Type":"ContainerDied","Data":"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73"} Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.036633 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"704acafb-fb16-45ae-b763-fabf037c4a90","Type":"ContainerDied","Data":"bcfcc0a1b9aec2135cdf41ac09a9b44343a6fb049d092e4b14be422666aa423e"} Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.036651 4938 scope.go:117] "RemoveContainer" containerID="fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.036594 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.082014 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.089665 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.096826 4938 scope.go:117] "RemoveContainer" containerID="fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73" Nov 22 11:01:21 crc kubenswrapper[4938]: E1122 11:01:21.098297 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73\": container with ID starting with fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73 not found: ID does not exist" containerID="fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.099465 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73"} err="failed to get container status \"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73\": rpc error: code = NotFound desc = could not find container \"fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73\": container with ID starting with fd146ab9d80b8468460045c0f32f9855e8b9d0d8325422a9bbeacd4731f38e73 not found: ID does not exist" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.101963 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:21 crc kubenswrapper[4938]: E1122 11:01:21.102585 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="704acafb-fb16-45ae-b763-fabf037c4a90" containerName="nova-scheduler-scheduler" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.102608 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="704acafb-fb16-45ae-b763-fabf037c4a90" containerName="nova-scheduler-scheduler" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.102839 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="704acafb-fb16-45ae-b763-fabf037c4a90" containerName="nova-scheduler-scheduler" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.103506 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.107327 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.113980 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.276717 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-config-data\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.277974 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmvxq\" (UniqueName: \"kubernetes.io/projected/7fb1a3ef-83a7-467e-9cd0-94310d410729-kube-api-access-xmvxq\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.278148 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.380051 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-config-data\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.380221 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmvxq\" (UniqueName: \"kubernetes.io/projected/7fb1a3ef-83a7-467e-9cd0-94310d410729-kube-api-access-xmvxq\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.380282 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.386131 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-config-data\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.386200 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb1a3ef-83a7-467e-9cd0-94310d410729-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.395519 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmvxq\" (UniqueName: \"kubernetes.io/projected/7fb1a3ef-83a7-467e-9cd0-94310d410729-kube-api-access-xmvxq\") pod \"nova-scheduler-0\" (UID: \"7fb1a3ef-83a7-467e-9cd0-94310d410729\") " pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.432975 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.845326 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.851713 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:01:21 crc kubenswrapper[4938]: W1122 11:01:21.853069 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7fb1a3ef_83a7_467e_9cd0_94310d410729.slice/crio-c98a52eea39a9b63dc4b7870e27aa3fbf108076f22bfbb80aa2ad30644622d58 WatchSource:0}: Error finding container c98a52eea39a9b63dc4b7870e27aa3fbf108076f22bfbb80aa2ad30644622d58: Status 404 returned error can't find the container with id c98a52eea39a9b63dc4b7870e27aa3fbf108076f22bfbb80aa2ad30644622d58 Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.990945 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgqhx\" (UniqueName: \"kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.991049 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.991088 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.991195 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.991308 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.991328 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs\") pod \"8008daf9-4eb6-4514-8510-5a6b3053758c\" (UID: \"8008daf9-4eb6-4514-8510-5a6b3053758c\") " Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.992070 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs" (OuterVolumeSpecName: "logs") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:01:21 crc kubenswrapper[4938]: I1122 11:01:21.998736 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx" (OuterVolumeSpecName: "kube-api-access-rgqhx") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "kube-api-access-rgqhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.021709 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data" (OuterVolumeSpecName: "config-data") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.022067 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.050141 4938 generic.go:334] "Generic (PLEG): container finished" podID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerID="545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587" exitCode=0 Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.050190 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.050209 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerDied","Data":"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587"} Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.051604 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8008daf9-4eb6-4514-8510-5a6b3053758c","Type":"ContainerDied","Data":"35c77513942d8d0fd1dc6d89223ec799ba97462f0b3bd6d610bed6ee263f9a72"} Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.051654 4938 scope.go:117] "RemoveContainer" containerID="545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.053028 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7fb1a3ef-83a7-467e-9cd0-94310d410729","Type":"ContainerStarted","Data":"aa8767d8eab1227ef776f780e1abdbe3c68f2c07764ccc6477fba2f233d9047d"} Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.053067 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7fb1a3ef-83a7-467e-9cd0-94310d410729","Type":"ContainerStarted","Data":"c98a52eea39a9b63dc4b7870e27aa3fbf108076f22bfbb80aa2ad30644622d58"} Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.053099 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.054740 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8008daf9-4eb6-4514-8510-5a6b3053758c" (UID: "8008daf9-4eb6-4514-8510-5a6b3053758c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.055301 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"209e9603-14ba-4706-87a0-00ea7f2bd737","Type":"ContainerStarted","Data":"1bbaa7560c9452d7883e69e3ee5a2ff792b179c5d9f8d0b2da523f5d83d9cbb2"} Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.079756 4938 scope.go:117] "RemoveContainer" containerID="5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.080791 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.0807705969999999 podStartE2EDuration="1.080770597s" podCreationTimestamp="2025-11-22 11:01:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:22.073116977 +0000 UTC m=+1414.540954396" watchObservedRunningTime="2025-11-22 11:01:22.080770597 +0000 UTC m=+1414.548607996" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094701 4938 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094738 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094749 4938 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8008daf9-4eb6-4514-8510-5a6b3053758c-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094759 4938 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094770 4938 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8008daf9-4eb6-4514-8510-5a6b3053758c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.094780 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgqhx\" (UniqueName: \"kubernetes.io/projected/8008daf9-4eb6-4514-8510-5a6b3053758c-kube-api-access-rgqhx\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.098427 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.098404984 podStartE2EDuration="2.098404984s" podCreationTimestamp="2025-11-22 11:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:22.092491797 +0000 UTC m=+1414.560329206" watchObservedRunningTime="2025-11-22 11:01:22.098404984 +0000 UTC m=+1414.566242403" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.103691 4938 scope.go:117] "RemoveContainer" containerID="545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587" Nov 22 11:01:22 crc kubenswrapper[4938]: E1122 11:01:22.104371 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587\": container with ID starting with 545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587 not found: ID does not exist" containerID="545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.104449 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587"} err="failed to get container status \"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587\": rpc error: code = NotFound desc = could not find container \"545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587\": container with ID starting with 545ac7efc87b16e9834d8f01c051666c4438fa10cbdb10135271e7c9b95ec587 not found: ID does not exist" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.104485 4938 scope.go:117] "RemoveContainer" containerID="5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf" Nov 22 11:01:22 crc kubenswrapper[4938]: E1122 11:01:22.104969 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf\": container with ID starting with 5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf not found: ID does not exist" containerID="5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.105063 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf"} err="failed to get container status \"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf\": rpc error: code = NotFound desc = could not find container \"5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf\": container with ID starting with 5e941ab3ad3499725b150e970976dbf88081d99507893c491be8e66bc4934caf not found: ID does not exist" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.403082 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.414851 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.427377 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:22 crc kubenswrapper[4938]: E1122 11:01:22.427975 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-api" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.428000 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-api" Nov 22 11:01:22 crc kubenswrapper[4938]: E1122 11:01:22.428036 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-log" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.428046 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-log" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.428268 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-log" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.428305 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" containerName="nova-api-api" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.429618 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.431628 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.431633 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.432383 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.437302 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.461621 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="704acafb-fb16-45ae-b763-fabf037c4a90" path="/var/lib/kubelet/pods/704acafb-fb16-45ae-b763-fabf037c4a90/volumes" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.462194 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8008daf9-4eb6-4514-8510-5a6b3053758c" path="/var/lib/kubelet/pods/8008daf9-4eb6-4514-8510-5a6b3053758c/volumes" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.603960 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vg8v\" (UniqueName: \"kubernetes.io/projected/5b5cb627-ee85-42aa-95e9-ece522c218a4-kube-api-access-4vg8v\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.604049 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-public-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.604178 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.604275 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-config-data\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.604299 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.604332 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b5cb627-ee85-42aa-95e9-ece522c218a4-logs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706230 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vg8v\" (UniqueName: \"kubernetes.io/projected/5b5cb627-ee85-42aa-95e9-ece522c218a4-kube-api-access-4vg8v\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706294 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-public-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706367 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706434 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-config-data\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706451 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.706476 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b5cb627-ee85-42aa-95e9-ece522c218a4-logs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.707029 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b5cb627-ee85-42aa-95e9-ece522c218a4-logs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.713566 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.713566 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-public-tls-certs\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.713836 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.713960 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b5cb627-ee85-42aa-95e9-ece522c218a4-config-data\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.732551 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vg8v\" (UniqueName: \"kubernetes.io/projected/5b5cb627-ee85-42aa-95e9-ece522c218a4-kube-api-access-4vg8v\") pod \"nova-api-0\" (UID: \"5b5cb627-ee85-42aa-95e9-ece522c218a4\") " pod="openstack/nova-api-0" Nov 22 11:01:22 crc kubenswrapper[4938]: I1122 11:01:22.746645 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:01:23 crc kubenswrapper[4938]: I1122 11:01:23.180526 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:01:23 crc kubenswrapper[4938]: W1122 11:01:23.184065 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b5cb627_ee85_42aa_95e9_ece522c218a4.slice/crio-437a9242ca262f998c12d1dcd9599bcc916732433d9faaf9a937194075e4691c WatchSource:0}: Error finding container 437a9242ca262f998c12d1dcd9599bcc916732433d9faaf9a937194075e4691c: Status 404 returned error can't find the container with id 437a9242ca262f998c12d1dcd9599bcc916732433d9faaf9a937194075e4691c Nov 22 11:01:24 crc kubenswrapper[4938]: I1122 11:01:24.075187 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b5cb627-ee85-42aa-95e9-ece522c218a4","Type":"ContainerStarted","Data":"88fd52decc0989bb294162ccd9cf429a5008a763024999ab1adad7f0d2b37228"} Nov 22 11:01:24 crc kubenswrapper[4938]: I1122 11:01:24.075759 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b5cb627-ee85-42aa-95e9-ece522c218a4","Type":"ContainerStarted","Data":"b81a06158ba5219dfe849dea4aadf72bf140df0dc19bf9a21d5b1e297cdb91ec"} Nov 22 11:01:24 crc kubenswrapper[4938]: I1122 11:01:24.075774 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5b5cb627-ee85-42aa-95e9-ece522c218a4","Type":"ContainerStarted","Data":"437a9242ca262f998c12d1dcd9599bcc916732433d9faaf9a937194075e4691c"} Nov 22 11:01:24 crc kubenswrapper[4938]: I1122 11:01:24.105143 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.105124583 podStartE2EDuration="2.105124583s" podCreationTimestamp="2025-11-22 11:01:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:24.094696575 +0000 UTC m=+1416.562533984" watchObservedRunningTime="2025-11-22 11:01:24.105124583 +0000 UTC m=+1416.572961982" Nov 22 11:01:25 crc kubenswrapper[4938]: I1122 11:01:25.445833 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:01:25 crc kubenswrapper[4938]: I1122 11:01:25.446244 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:01:26 crc kubenswrapper[4938]: I1122 11:01:26.433134 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 11:01:30 crc kubenswrapper[4938]: I1122 11:01:30.445874 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:01:30 crc kubenswrapper[4938]: I1122 11:01:30.446461 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:01:31 crc kubenswrapper[4938]: I1122 11:01:31.433640 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 11:01:31 crc kubenswrapper[4938]: I1122 11:01:31.460196 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="209e9603-14ba-4706-87a0-00ea7f2bd737" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.203:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:01:31 crc kubenswrapper[4938]: I1122 11:01:31.460230 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="209e9603-14ba-4706-87a0-00ea7f2bd737" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.203:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:01:31 crc kubenswrapper[4938]: I1122 11:01:31.471423 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 11:01:32 crc kubenswrapper[4938]: I1122 11:01:32.170537 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 11:01:32 crc kubenswrapper[4938]: I1122 11:01:32.747663 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:01:32 crc kubenswrapper[4938]: I1122 11:01:32.747759 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:01:33 crc kubenswrapper[4938]: I1122 11:01:33.766101 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5b5cb627-ee85-42aa-95e9-ece522c218a4" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:01:33 crc kubenswrapper[4938]: I1122 11:01:33.766096 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5b5cb627-ee85-42aa-95e9-ece522c218a4" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:01:34 crc kubenswrapper[4938]: I1122 11:01:34.137115 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 11:01:40 crc kubenswrapper[4938]: I1122 11:01:40.461407 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:01:40 crc kubenswrapper[4938]: I1122 11:01:40.461876 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:01:40 crc kubenswrapper[4938]: I1122 11:01:40.471571 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:01:40 crc kubenswrapper[4938]: I1122 11:01:40.474810 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:01:42 crc kubenswrapper[4938]: I1122 11:01:42.758594 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:01:42 crc kubenswrapper[4938]: I1122 11:01:42.759544 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:01:42 crc kubenswrapper[4938]: I1122 11:01:42.762318 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:01:42 crc kubenswrapper[4938]: I1122 11:01:42.765094 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:01:43 crc kubenswrapper[4938]: I1122 11:01:43.769186 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:01:43 crc kubenswrapper[4938]: I1122 11:01:43.777309 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:01:51 crc kubenswrapper[4938]: I1122 11:01:51.454364 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:01:52 crc kubenswrapper[4938]: I1122 11:01:52.954458 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:01:55 crc kubenswrapper[4938]: I1122 11:01:55.435261 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="rabbitmq" containerID="cri-o://17c6db99431b037bf08e4a0e783e917142cb0caf2d7ba06872fc681dbe0e72c4" gracePeriod=604797 Nov 22 11:01:56 crc kubenswrapper[4938]: I1122 11:01:56.893595 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="rabbitmq" containerID="cri-o://4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040" gracePeriod=604797 Nov 22 11:02:01 crc kubenswrapper[4938]: I1122 11:02:01.961647 4938 generic.go:334] "Generic (PLEG): container finished" podID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerID="17c6db99431b037bf08e4a0e783e917142cb0caf2d7ba06872fc681dbe0e72c4" exitCode=0 Nov 22 11:02:01 crc kubenswrapper[4938]: I1122 11:02:01.961753 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerDied","Data":"17c6db99431b037bf08e4a0e783e917142cb0caf2d7ba06872fc681dbe0e72c4"} Nov 22 11:02:01 crc kubenswrapper[4938]: I1122 11:02:01.962244 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c85fce90-13b6-40ad-a1ec-f0bf5168038e","Type":"ContainerDied","Data":"a0edf03f75c3983853acde238cca29fecc048964579a3198fbc12bdd6fce816b"} Nov 22 11:02:01 crc kubenswrapper[4938]: I1122 11:02:01.962262 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0edf03f75c3983853acde238cca29fecc048964579a3198fbc12bdd6fce816b" Nov 22 11:02:01 crc kubenswrapper[4938]: I1122 11:02:01.995647 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086483 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086542 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086619 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086671 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086758 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp4n9\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086838 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086934 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.086990 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087023 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087048 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087098 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087112 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd\") pod \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\" (UID: \"c85fce90-13b6-40ad-a1ec-f0bf5168038e\") " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087128 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087954 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.087976 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.090555 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.096818 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.097331 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.101108 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info" (OuterVolumeSpecName: "pod-info") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.112110 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.119149 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9" (OuterVolumeSpecName: "kube-api-access-qp4n9") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "kube-api-access-qp4n9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.135642 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data" (OuterVolumeSpecName: "config-data") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.153002 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf" (OuterVolumeSpecName: "server-conf") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191256 4938 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c85fce90-13b6-40ad-a1ec-f0bf5168038e-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191300 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp4n9\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-kube-api-access-qp4n9\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191315 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191326 4938 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191337 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191348 4938 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c85fce90-13b6-40ad-a1ec-f0bf5168038e-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191359 4938 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c85fce90-13b6-40ad-a1ec-f0bf5168038e-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.191396 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.229098 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.240333 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c85fce90-13b6-40ad-a1ec-f0bf5168038e" (UID: "c85fce90-13b6-40ad-a1ec-f0bf5168038e"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.292937 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c85fce90-13b6-40ad-a1ec-f0bf5168038e-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.292971 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:02 crc kubenswrapper[4938]: I1122 11:02:02.970347 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.005011 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.017306 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.026746 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:02:03 crc kubenswrapper[4938]: E1122 11:02:03.027211 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.027234 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: E1122 11:02:03.027263 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="setup-container" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.027270 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="setup-container" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.027476 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.030842 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.033800 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.034453 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.034791 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.034958 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.035033 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f846b" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.035926 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.036819 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.046167 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.107624 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.107678 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqh5x\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-kube-api-access-cqh5x\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.107699 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.107830 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-server-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108047 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108136 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108178 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-config-data\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108201 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/30956ae1-6658-45ca-867e-12fb808394db-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108416 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108476 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/30956ae1-6658-45ca-867e-12fb808394db-pod-info\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.108613 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.210858 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.210962 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.210986 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqh5x\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-kube-api-access-cqh5x\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211006 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211045 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-server-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211074 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211097 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211113 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-config-data\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211126 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/30956ae1-6658-45ca-867e-12fb808394db-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211186 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.211207 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/30956ae1-6658-45ca-867e-12fb808394db-pod-info\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.212569 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.212884 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.213196 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-config-data\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.213205 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.214170 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-server-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.215146 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/30956ae1-6658-45ca-867e-12fb808394db-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.216662 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.216948 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.217687 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/30956ae1-6658-45ca-867e-12fb808394db-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.221972 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/30956ae1-6658-45ca-867e-12fb808394db-pod-info\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.243149 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqh5x\" (UniqueName: \"kubernetes.io/projected/30956ae1-6658-45ca-867e-12fb808394db-kube-api-access-cqh5x\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.263566 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"30956ae1-6658-45ca-867e-12fb808394db\") " pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.350257 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.424961 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520643 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520687 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520755 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520801 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520819 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcb7l\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520851 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.520903 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.521009 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.521062 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.521080 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.521097 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins\") pod \"18ebf838-be34-4ba1-b8f0-031a5477ca78\" (UID: \"18ebf838-be34-4ba1-b8f0-031a5477ca78\") " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.524788 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.526333 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info" (OuterVolumeSpecName: "pod-info") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.528243 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.528551 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.529055 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.533575 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.533713 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.534972 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l" (OuterVolumeSpecName: "kube-api-access-dcb7l") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "kube-api-access-dcb7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.581089 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data" (OuterVolumeSpecName: "config-data") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.612276 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d558885bc-xvsgl"] Nov 22 11:02:03 crc kubenswrapper[4938]: E1122 11:02:03.612789 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.612814 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: E1122 11:02:03.613795 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="setup-container" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.613873 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="setup-container" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.614161 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerName="rabbitmq" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.617868 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.620598 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623032 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623063 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623078 4938 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/18ebf838-be34-4ba1-b8f0-031a5477ca78-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623091 4938 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/18ebf838-be34-4ba1-b8f0-031a5477ca78-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623105 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcb7l\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-kube-api-access-dcb7l\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623131 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623143 4938 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623154 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.623164 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.642490 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-xvsgl"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.651273 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf" (OuterVolumeSpecName: "server-conf") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.670345 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.682641 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-xvsgl"] Nov 22 11:02:03 crc kubenswrapper[4938]: E1122 11:02:03.684076 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-w5swx openstack-edpm-ipam ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[config dns-svc dns-swift-storage-0 kube-api-access-w5swx openstack-edpm-ipam ovsdbserver-nb ovsdbserver-sb]: context canceled" pod="openstack/dnsmasq-dns-d558885bc-xvsgl" podUID="551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725048 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725108 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725156 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725190 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5swx\" (UniqueName: \"kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725214 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725275 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725326 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725428 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.725443 4938 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/18ebf838-be34-4ba1-b8f0-031a5477ca78-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.740597 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "18ebf838-be34-4ba1-b8f0-031a5477ca78" (UID: "18ebf838-be34-4ba1-b8f0-031a5477ca78"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.788100 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-qqnx6"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.790099 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.821425 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-qqnx6"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.827941 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828001 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828023 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828052 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828084 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828109 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-config\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828152 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828174 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828197 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828215 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828239 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828252 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98f8f\" (UniqueName: \"kubernetes.io/projected/7b526fbb-fe31-4192-8756-67eaea9b813d-kube-api-access-98f8f\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828301 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5swx\" (UniqueName: \"kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828323 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.828373 4938 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/18ebf838-be34-4ba1-b8f0-031a5477ca78-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.829041 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.832605 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.833243 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.833765 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.834356 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.836487 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.872929 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5swx\" (UniqueName: \"kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx\") pod \"dnsmasq-dns-d558885bc-xvsgl\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.913683 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.932884 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.932978 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-config\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.933172 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.933255 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.933290 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98f8f\" (UniqueName: \"kubernetes.io/projected/7b526fbb-fe31-4192-8756-67eaea9b813d-kube-api-access-98f8f\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.933370 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.933402 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.935536 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-svc\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.936504 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-nb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.947615 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-ovsdbserver-sb\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.948294 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-dns-swift-storage-0\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.948476 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-openstack-edpm-ipam\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.949013 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b526fbb-fe31-4192-8756-67eaea9b813d-config\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.968264 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98f8f\" (UniqueName: \"kubernetes.io/projected/7b526fbb-fe31-4192-8756-67eaea9b813d-kube-api-access-98f8f\") pod \"dnsmasq-dns-78c64bc9c5-qqnx6\" (UID: \"7b526fbb-fe31-4192-8756-67eaea9b813d\") " pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.982970 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"30956ae1-6658-45ca-867e-12fb808394db","Type":"ContainerStarted","Data":"fa7266ccdabdaf4acba3ce1db51c7388f69677fd116f2fef9f2f04ca61da6a84"} Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.986844 4938 generic.go:334] "Generic (PLEG): container finished" podID="18ebf838-be34-4ba1-b8f0-031a5477ca78" containerID="4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040" exitCode=0 Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.986971 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.987874 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.988987 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerDied","Data":"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040"} Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.989397 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"18ebf838-be34-4ba1-b8f0-031a5477ca78","Type":"ContainerDied","Data":"899bad9c334e83c81503618b39d5978b3b51e65bb0e20c31e969cf3b93a0e44d"} Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.989516 4938 scope.go:117] "RemoveContainer" containerID="4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040" Nov 22 11:02:03 crc kubenswrapper[4938]: I1122 11:02:03.997955 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.001398 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.028746 4938 scope.go:117] "RemoveContainer" containerID="a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034421 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034521 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034546 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034626 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034820 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034899 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.034970 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.035048 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5swx\" (UniqueName: \"kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx\") pod \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\" (UID: \"551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e\") " Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.035556 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.035567 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.036069 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.036151 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.036184 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.037042 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.037302 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config" (OuterVolumeSpecName: "config") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.050905 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx" (OuterVolumeSpecName: "kube-api-access-w5swx") pod "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" (UID: "551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e"). InnerVolumeSpecName "kube-api-access-w5swx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.074228 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.089597 4938 scope.go:117] "RemoveContainer" containerID="4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040" Nov 22 11:02:04 crc kubenswrapper[4938]: E1122 11:02:04.092089 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040\": container with ID starting with 4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040 not found: ID does not exist" containerID="4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.092133 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040"} err="failed to get container status \"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040\": rpc error: code = NotFound desc = could not find container \"4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040\": container with ID starting with 4403b6cb586d0dc345d7d9b14b8ea140db4d56c010c0a12ea6319769bf171040 not found: ID does not exist" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.092161 4938 scope.go:117] "RemoveContainer" containerID="a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.096677 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:02:04 crc kubenswrapper[4938]: E1122 11:02:04.097104 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652\": container with ID starting with a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652 not found: ID does not exist" containerID="a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.097143 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652"} err="failed to get container status \"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652\": rpc error: code = NotFound desc = could not find container \"a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652\": container with ID starting with a458a922a4d183ce4cbc3967e62865f46c22904f37a18ec220c88cbfbbec3652 not found: ID does not exist" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.121688 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.123512 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130122 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130163 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130352 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130399 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130465 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.130560 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.131196 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.131357 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-g42d6" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.139848 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.139882 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5swx\" (UniqueName: \"kubernetes.io/projected/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-kube-api-access-w5swx\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.139899 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.139954 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.139968 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.242928 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.242982 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243021 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ml5t\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-kube-api-access-8ml5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243067 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243108 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243130 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243145 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fc5cb0aa-c3a0-436c-b911-6029b94775a8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243161 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243196 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243233 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.243277 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fc5cb0aa-c3a0-436c-b911-6029b94775a8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344661 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344716 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344735 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fc5cb0aa-c3a0-436c-b911-6029b94775a8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344755 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344801 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344836 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344882 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fc5cb0aa-c3a0-436c-b911-6029b94775a8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344935 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344961 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.344998 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ml5t\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-kube-api-access-8ml5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.345022 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.345179 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.346530 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.347353 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.348761 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.352665 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.352781 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fc5cb0aa-c3a0-436c-b911-6029b94775a8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.352958 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fc5cb0aa-c3a0-436c-b911-6029b94775a8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.360533 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.361058 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.363429 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fc5cb0aa-c3a0-436c-b911-6029b94775a8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.364025 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ml5t\" (UniqueName: \"kubernetes.io/projected/fc5cb0aa-c3a0-436c-b911-6029b94775a8-kube-api-access-8ml5t\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.383338 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"fc5cb0aa-c3a0-436c-b911-6029b94775a8\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.448164 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.461618 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18ebf838-be34-4ba1-b8f0-031a5477ca78" path="/var/lib/kubelet/pods/18ebf838-be34-4ba1-b8f0-031a5477ca78/volumes" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.462643 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c85fce90-13b6-40ad-a1ec-f0bf5168038e" path="/var/lib/kubelet/pods/c85fce90-13b6-40ad-a1ec-f0bf5168038e/volumes" Nov 22 11:02:04 crc kubenswrapper[4938]: I1122 11:02:04.503123 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78c64bc9c5-qqnx6"] Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:04.898669 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:02:05 crc kubenswrapper[4938]: W1122 11:02:04.902431 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc5cb0aa_c3a0_436c_b911_6029b94775a8.slice/crio-9646ef3907b883b4fe8a55cc712cb19394a1d88a9fac4bf776fd0e3f7d901b2d WatchSource:0}: Error finding container 9646ef3907b883b4fe8a55cc712cb19394a1d88a9fac4bf776fd0e3f7d901b2d: Status 404 returned error can't find the container with id 9646ef3907b883b4fe8a55cc712cb19394a1d88a9fac4bf776fd0e3f7d901b2d Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:04.999429 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"fc5cb0aa-c3a0-436c-b911-6029b94775a8","Type":"ContainerStarted","Data":"9646ef3907b883b4fe8a55cc712cb19394a1d88a9fac4bf776fd0e3f7d901b2d"} Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.001328 4938 generic.go:334] "Generic (PLEG): container finished" podID="7b526fbb-fe31-4192-8756-67eaea9b813d" containerID="6eb16a31b7d4c760d622497b7caa165f0c8107561642de0df96da1b0cb99f412" exitCode=0 Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.001460 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d558885bc-xvsgl" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.002129 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" event={"ID":"7b526fbb-fe31-4192-8756-67eaea9b813d","Type":"ContainerDied","Data":"6eb16a31b7d4c760d622497b7caa165f0c8107561642de0df96da1b0cb99f412"} Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.002194 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" event={"ID":"7b526fbb-fe31-4192-8756-67eaea9b813d","Type":"ContainerStarted","Data":"5b373e417b49fe60af8edf29e17beb5d31ca0ab7502d8471850a60aecdb34fa3"} Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.303711 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.307858 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.337664 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.365869 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.369231 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhpff\" (UniqueName: \"kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.369295 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.471645 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.471728 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhpff\" (UniqueName: \"kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.471750 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.472576 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.472797 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.491961 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhpff\" (UniqueName: \"kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff\") pod \"redhat-operators-rclnq\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.580494 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.594616 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-xvsgl"] Nov 22 11:02:05 crc kubenswrapper[4938]: I1122 11:02:05.604090 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d558885bc-xvsgl"] Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.012634 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" event={"ID":"7b526fbb-fe31-4192-8756-67eaea9b813d","Type":"ContainerStarted","Data":"56dba75e87d62ad42cfd71c7d39330e9fd36596b86eaccf4bfeedab63d81089e"} Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.012966 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.015145 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"30956ae1-6658-45ca-867e-12fb808394db","Type":"ContainerStarted","Data":"904c1ba783a8d6b983a26c7622a1649a647aa4abdcf90fc93c04fca34ca2de2b"} Nov 22 11:02:06 crc kubenswrapper[4938]: W1122 11:02:06.023558 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc6ccd12_b009_46c3_ab37_70756536c540.slice/crio-bc727efd021ee4cb711141c8749784fe311588ec2604a78b30a7be3f97984375 WatchSource:0}: Error finding container bc727efd021ee4cb711141c8749784fe311588ec2604a78b30a7be3f97984375: Status 404 returned error can't find the container with id bc727efd021ee4cb711141c8749784fe311588ec2604a78b30a7be3f97984375 Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.027047 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.041358 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" podStartSLOduration=3.041340704 podStartE2EDuration="3.041340704s" podCreationTimestamp="2025-11-22 11:02:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:02:06.034519265 +0000 UTC m=+1458.502356664" watchObservedRunningTime="2025-11-22 11:02:06.041340704 +0000 UTC m=+1458.509178103" Nov 22 11:02:06 crc kubenswrapper[4938]: I1122 11:02:06.458394 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e" path="/var/lib/kubelet/pods/551d335c-ce35-4fb2-9ca1-7ffb0b0c3c2e/volumes" Nov 22 11:02:07 crc kubenswrapper[4938]: I1122 11:02:07.024882 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"fc5cb0aa-c3a0-436c-b911-6029b94775a8","Type":"ContainerStarted","Data":"289a9871294df2e90f5eb7b67e834dc0935b78959c412b91228e904edd999bb2"} Nov 22 11:02:07 crc kubenswrapper[4938]: I1122 11:02:07.026952 4938 generic.go:334] "Generic (PLEG): container finished" podID="dc6ccd12-b009-46c3-ab37-70756536c540" containerID="638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1" exitCode=0 Nov 22 11:02:07 crc kubenswrapper[4938]: I1122 11:02:07.027009 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerDied","Data":"638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1"} Nov 22 11:02:07 crc kubenswrapper[4938]: I1122 11:02:07.027044 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerStarted","Data":"bc727efd021ee4cb711141c8749784fe311588ec2604a78b30a7be3f97984375"} Nov 22 11:02:08 crc kubenswrapper[4938]: I1122 11:02:08.037242 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerStarted","Data":"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31"} Nov 22 11:02:09 crc kubenswrapper[4938]: I1122 11:02:09.064798 4938 generic.go:334] "Generic (PLEG): container finished" podID="dc6ccd12-b009-46c3-ab37-70756536c540" containerID="3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31" exitCode=0 Nov 22 11:02:09 crc kubenswrapper[4938]: I1122 11:02:09.064851 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerDied","Data":"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31"} Nov 22 11:02:10 crc kubenswrapper[4938]: I1122 11:02:10.077441 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerStarted","Data":"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244"} Nov 22 11:02:10 crc kubenswrapper[4938]: I1122 11:02:10.099604 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rclnq" podStartSLOduration=2.5320342780000002 podStartE2EDuration="5.099586307s" podCreationTimestamp="2025-11-22 11:02:05 +0000 UTC" firstStartedPulling="2025-11-22 11:02:07.028417954 +0000 UTC m=+1459.496255353" lastFinishedPulling="2025-11-22 11:02:09.595969983 +0000 UTC m=+1462.063807382" observedRunningTime="2025-11-22 11:02:10.09567213 +0000 UTC m=+1462.563509529" watchObservedRunningTime="2025-11-22 11:02:10.099586307 +0000 UTC m=+1462.567423706" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.000083 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78c64bc9c5-qqnx6" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.075858 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.076145 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="dnsmasq-dns" containerID="cri-o://a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d" gracePeriod=10 Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.538659 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732419 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftqft\" (UniqueName: \"kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732483 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732564 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732625 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732723 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.732775 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb\") pod \"58b89450-2eae-4604-b2e3-eb29ab66f574\" (UID: \"58b89450-2eae-4604-b2e3-eb29ab66f574\") " Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.745383 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft" (OuterVolumeSpecName: "kube-api-access-ftqft") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "kube-api-access-ftqft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.792755 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.800769 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.804159 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.811776 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config" (OuterVolumeSpecName: "config") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.816949 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "58b89450-2eae-4604-b2e3-eb29ab66f574" (UID: "58b89450-2eae-4604-b2e3-eb29ab66f574"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.834863 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftqft\" (UniqueName: \"kubernetes.io/projected/58b89450-2eae-4604-b2e3-eb29ab66f574-kube-api-access-ftqft\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.835081 4938 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.835150 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.835200 4938 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.835247 4938 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:14 crc kubenswrapper[4938]: I1122 11:02:14.835298 4938 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58b89450-2eae-4604-b2e3-eb29ab66f574-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.129025 4938 generic.go:334] "Generic (PLEG): container finished" podID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerID="a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d" exitCode=0 Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.129072 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.129099 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" event={"ID":"58b89450-2eae-4604-b2e3-eb29ab66f574","Type":"ContainerDied","Data":"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d"} Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.129203 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-8cftt" event={"ID":"58b89450-2eae-4604-b2e3-eb29ab66f574","Type":"ContainerDied","Data":"34e46163c88abb45b1aead8df895bed451ea34938e1acbf9f664bf44d766c0ac"} Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.129228 4938 scope.go:117] "RemoveContainer" containerID="a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.160813 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.163818 4938 scope.go:117] "RemoveContainer" containerID="364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.169509 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-8cftt"] Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.203801 4938 scope.go:117] "RemoveContainer" containerID="a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d" Nov 22 11:02:15 crc kubenswrapper[4938]: E1122 11:02:15.204593 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d\": container with ID starting with a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d not found: ID does not exist" containerID="a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.204638 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d"} err="failed to get container status \"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d\": rpc error: code = NotFound desc = could not find container \"a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d\": container with ID starting with a527fd0a0b0050fc0275623c7a461a4d428fbfaafea31cfe3a169ae5b5522f0d not found: ID does not exist" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.204667 4938 scope.go:117] "RemoveContainer" containerID="364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2" Nov 22 11:02:15 crc kubenswrapper[4938]: E1122 11:02:15.205282 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2\": container with ID starting with 364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2 not found: ID does not exist" containerID="364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.205336 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2"} err="failed to get container status \"364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2\": rpc error: code = NotFound desc = could not find container \"364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2\": container with ID starting with 364b645ff49420722d8b62540356a6e0de85baa13955237a64db156f2b912ef2 not found: ID does not exist" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.581126 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.581444 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:15 crc kubenswrapper[4938]: I1122 11:02:15.627809 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:16 crc kubenswrapper[4938]: I1122 11:02:16.191045 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:16 crc kubenswrapper[4938]: I1122 11:02:16.243802 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:16 crc kubenswrapper[4938]: I1122 11:02:16.471674 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" path="/var/lib/kubelet/pods/58b89450-2eae-4604-b2e3-eb29ab66f574/volumes" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.154982 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rclnq" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="registry-server" containerID="cri-o://b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244" gracePeriod=2 Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.603492 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.608298 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content\") pod \"dc6ccd12-b009-46c3-ab37-70756536c540\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.610211 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhpff\" (UniqueName: \"kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff\") pod \"dc6ccd12-b009-46c3-ab37-70756536c540\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.610460 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities\") pod \"dc6ccd12-b009-46c3-ab37-70756536c540\" (UID: \"dc6ccd12-b009-46c3-ab37-70756536c540\") " Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.611443 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities" (OuterVolumeSpecName: "utilities") pod "dc6ccd12-b009-46c3-ab37-70756536c540" (UID: "dc6ccd12-b009-46c3-ab37-70756536c540"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.615353 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.618711 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff" (OuterVolumeSpecName: "kube-api-access-bhpff") pod "dc6ccd12-b009-46c3-ab37-70756536c540" (UID: "dc6ccd12-b009-46c3-ab37-70756536c540"). InnerVolumeSpecName "kube-api-access-bhpff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.687644 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc6ccd12-b009-46c3-ab37-70756536c540" (UID: "dc6ccd12-b009-46c3-ab37-70756536c540"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.717490 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6ccd12-b009-46c3-ab37-70756536c540-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:18 crc kubenswrapper[4938]: I1122 11:02:18.717535 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhpff\" (UniqueName: \"kubernetes.io/projected/dc6ccd12-b009-46c3-ab37-70756536c540-kube-api-access-bhpff\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.166123 4938 generic.go:334] "Generic (PLEG): container finished" podID="dc6ccd12-b009-46c3-ab37-70756536c540" containerID="b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244" exitCode=0 Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.166195 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerDied","Data":"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244"} Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.166251 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rclnq" event={"ID":"dc6ccd12-b009-46c3-ab37-70756536c540","Type":"ContainerDied","Data":"bc727efd021ee4cb711141c8749784fe311588ec2604a78b30a7be3f97984375"} Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.166277 4938 scope.go:117] "RemoveContainer" containerID="b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.166277 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rclnq" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.196096 4938 scope.go:117] "RemoveContainer" containerID="3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.201251 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.212131 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rclnq"] Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.238234 4938 scope.go:117] "RemoveContainer" containerID="638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.263742 4938 scope.go:117] "RemoveContainer" containerID="b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244" Nov 22 11:02:19 crc kubenswrapper[4938]: E1122 11:02:19.264089 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244\": container with ID starting with b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244 not found: ID does not exist" containerID="b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.264121 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244"} err="failed to get container status \"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244\": rpc error: code = NotFound desc = could not find container \"b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244\": container with ID starting with b07ce60d5e6d631111e7a48ecac99dd2e9751aecc44f57afb47265c9e41b0244 not found: ID does not exist" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.264146 4938 scope.go:117] "RemoveContainer" containerID="3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31" Nov 22 11:02:19 crc kubenswrapper[4938]: E1122 11:02:19.264516 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31\": container with ID starting with 3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31 not found: ID does not exist" containerID="3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.264554 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31"} err="failed to get container status \"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31\": rpc error: code = NotFound desc = could not find container \"3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31\": container with ID starting with 3221fe1fdedea846ed028a613a757510e84cdaef9758b681161e68fb006e3a31 not found: ID does not exist" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.264601 4938 scope.go:117] "RemoveContainer" containerID="638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1" Nov 22 11:02:19 crc kubenswrapper[4938]: E1122 11:02:19.265083 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1\": container with ID starting with 638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1 not found: ID does not exist" containerID="638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1" Nov 22 11:02:19 crc kubenswrapper[4938]: I1122 11:02:19.265113 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1"} err="failed to get container status \"638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1\": rpc error: code = NotFound desc = could not find container \"638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1\": container with ID starting with 638d5d804350f4a8eb60631c55782fab5647c0c2fb8f7ab6ca4c64a19a2980f1 not found: ID does not exist" Nov 22 11:02:20 crc kubenswrapper[4938]: I1122 11:02:20.457624 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" path="/var/lib/kubelet/pods/dc6ccd12-b009-46c3-ab37-70756536c540/volumes" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.540702 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v"] Nov 22 11:02:22 crc kubenswrapper[4938]: E1122 11:02:22.541346 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="registry-server" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541359 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="registry-server" Nov 22 11:02:22 crc kubenswrapper[4938]: E1122 11:02:22.541381 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="extract-utilities" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541387 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="extract-utilities" Nov 22 11:02:22 crc kubenswrapper[4938]: E1122 11:02:22.541396 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="dnsmasq-dns" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541404 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="dnsmasq-dns" Nov 22 11:02:22 crc kubenswrapper[4938]: E1122 11:02:22.541423 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="init" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541430 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="init" Nov 22 11:02:22 crc kubenswrapper[4938]: E1122 11:02:22.541456 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="extract-content" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541462 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="extract-content" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541626 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc6ccd12-b009-46c3-ab37-70756536c540" containerName="registry-server" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.541642 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="58b89450-2eae-4604-b2e3-eb29ab66f574" containerName="dnsmasq-dns" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.542333 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.544146 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.544430 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.544783 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.545080 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.551743 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v"] Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.595514 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whb5d\" (UniqueName: \"kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.595561 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.595587 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.595749 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.698311 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.698464 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whb5d\" (UniqueName: \"kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.698491 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.698517 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.706622 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.707293 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.708468 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.716624 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whb5d\" (UniqueName: \"kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:22 crc kubenswrapper[4938]: I1122 11:02:22.862434 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:23 crc kubenswrapper[4938]: I1122 11:02:23.398860 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v"] Nov 22 11:02:23 crc kubenswrapper[4938]: W1122 11:02:23.409014 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac9be71f_c722_4d3e_b43a_7dffaa096daf.slice/crio-9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71 WatchSource:0}: Error finding container 9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71: Status 404 returned error can't find the container with id 9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71 Nov 22 11:02:23 crc kubenswrapper[4938]: I1122 11:02:23.544677 4938 scope.go:117] "RemoveContainer" containerID="ba096eb02091d8c3a9903e2e267ae7540fdc7f45825b897f8f3fd5cde5794d36" Nov 22 11:02:24 crc kubenswrapper[4938]: I1122 11:02:24.227230 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" event={"ID":"ac9be71f-c722-4d3e-b43a-7dffaa096daf","Type":"ContainerStarted","Data":"9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71"} Nov 22 11:02:33 crc kubenswrapper[4938]: I1122 11:02:33.322684 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" event={"ID":"ac9be71f-c722-4d3e-b43a-7dffaa096daf","Type":"ContainerStarted","Data":"d87a6948a4a11f336a1018108acffc60253d8efc7b044941a785d159f355e557"} Nov 22 11:02:33 crc kubenswrapper[4938]: I1122 11:02:33.344311 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" podStartSLOduration=2.00514349 podStartE2EDuration="11.344293832s" podCreationTimestamp="2025-11-22 11:02:22 +0000 UTC" firstStartedPulling="2025-11-22 11:02:23.412076777 +0000 UTC m=+1475.879914176" lastFinishedPulling="2025-11-22 11:02:32.751227119 +0000 UTC m=+1485.219064518" observedRunningTime="2025-11-22 11:02:33.342515078 +0000 UTC m=+1485.810352747" watchObservedRunningTime="2025-11-22 11:02:33.344293832 +0000 UTC m=+1485.812131231" Nov 22 11:02:38 crc kubenswrapper[4938]: I1122 11:02:38.363660 4938 generic.go:334] "Generic (PLEG): container finished" podID="30956ae1-6658-45ca-867e-12fb808394db" containerID="904c1ba783a8d6b983a26c7622a1649a647aa4abdcf90fc93c04fca34ca2de2b" exitCode=0 Nov 22 11:02:38 crc kubenswrapper[4938]: I1122 11:02:38.363741 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"30956ae1-6658-45ca-867e-12fb808394db","Type":"ContainerDied","Data":"904c1ba783a8d6b983a26c7622a1649a647aa4abdcf90fc93c04fca34ca2de2b"} Nov 22 11:02:39 crc kubenswrapper[4938]: I1122 11:02:39.373310 4938 generic.go:334] "Generic (PLEG): container finished" podID="fc5cb0aa-c3a0-436c-b911-6029b94775a8" containerID="289a9871294df2e90f5eb7b67e834dc0935b78959c412b91228e904edd999bb2" exitCode=0 Nov 22 11:02:39 crc kubenswrapper[4938]: I1122 11:02:39.373629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"fc5cb0aa-c3a0-436c-b911-6029b94775a8","Type":"ContainerDied","Data":"289a9871294df2e90f5eb7b67e834dc0935b78959c412b91228e904edd999bb2"} Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.383744 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"fc5cb0aa-c3a0-436c-b911-6029b94775a8","Type":"ContainerStarted","Data":"26e48120c7ae24b30c36ee241b9e2727721e5cf37a97e2e47ea388edbf34fc4f"} Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.384244 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.386210 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"30956ae1-6658-45ca-867e-12fb808394db","Type":"ContainerStarted","Data":"24d88c9828f7bd0e92c167a79c65fb65d4ce244519fdd9d4a50e8632acf63e4e"} Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.386428 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.408726 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.408702503 podStartE2EDuration="36.408702503s" podCreationTimestamp="2025-11-22 11:02:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:02:40.405353031 +0000 UTC m=+1492.873190430" watchObservedRunningTime="2025-11-22 11:02:40.408702503 +0000 UTC m=+1492.876539902" Nov 22 11:02:40 crc kubenswrapper[4938]: I1122 11:02:40.429511 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.429491177 podStartE2EDuration="37.429491177s" podCreationTimestamp="2025-11-22 11:02:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:02:40.423504549 +0000 UTC m=+1492.891341948" watchObservedRunningTime="2025-11-22 11:02:40.429491177 +0000 UTC m=+1492.897328576" Nov 22 11:02:41 crc kubenswrapper[4938]: I1122 11:02:41.300725 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:02:41 crc kubenswrapper[4938]: I1122 11:02:41.301124 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:02:49 crc kubenswrapper[4938]: I1122 11:02:49.484714 4938 generic.go:334] "Generic (PLEG): container finished" podID="ac9be71f-c722-4d3e-b43a-7dffaa096daf" containerID="d87a6948a4a11f336a1018108acffc60253d8efc7b044941a785d159f355e557" exitCode=0 Nov 22 11:02:49 crc kubenswrapper[4938]: I1122 11:02:49.484745 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" event={"ID":"ac9be71f-c722-4d3e-b43a-7dffaa096daf","Type":"ContainerDied","Data":"d87a6948a4a11f336a1018108acffc60253d8efc7b044941a785d159f355e557"} Nov 22 11:02:50 crc kubenswrapper[4938]: I1122 11:02:50.898337 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.025758 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle\") pod \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.025881 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory\") pod \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.026038 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key\") pod \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.026114 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whb5d\" (UniqueName: \"kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d\") pod \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\" (UID: \"ac9be71f-c722-4d3e-b43a-7dffaa096daf\") " Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.032714 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "ac9be71f-c722-4d3e-b43a-7dffaa096daf" (UID: "ac9be71f-c722-4d3e-b43a-7dffaa096daf"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.034371 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d" (OuterVolumeSpecName: "kube-api-access-whb5d") pod "ac9be71f-c722-4d3e-b43a-7dffaa096daf" (UID: "ac9be71f-c722-4d3e-b43a-7dffaa096daf"). InnerVolumeSpecName "kube-api-access-whb5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.063375 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ac9be71f-c722-4d3e-b43a-7dffaa096daf" (UID: "ac9be71f-c722-4d3e-b43a-7dffaa096daf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.066106 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory" (OuterVolumeSpecName: "inventory") pod "ac9be71f-c722-4d3e-b43a-7dffaa096daf" (UID: "ac9be71f-c722-4d3e-b43a-7dffaa096daf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.127807 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.127842 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whb5d\" (UniqueName: \"kubernetes.io/projected/ac9be71f-c722-4d3e-b43a-7dffaa096daf-kube-api-access-whb5d\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.127855 4938 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.127864 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac9be71f-c722-4d3e-b43a-7dffaa096daf-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.503399 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" event={"ID":"ac9be71f-c722-4d3e-b43a-7dffaa096daf","Type":"ContainerDied","Data":"9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71"} Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.504015 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b9199c02f96b5ad9e2cba164414a6fbd9502b4379a96deefc35a35a1d144c71" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.503472 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.594200 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk"] Nov 22 11:02:51 crc kubenswrapper[4938]: E1122 11:02:51.594960 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac9be71f-c722-4d3e-b43a-7dffaa096daf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.595044 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac9be71f-c722-4d3e-b43a-7dffaa096daf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.595348 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac9be71f-c722-4d3e-b43a-7dffaa096daf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.596440 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.598688 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.599202 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.599962 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.603535 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk"] Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.604508 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.736844 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.737285 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5tp5\" (UniqueName: \"kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.737332 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.838929 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5tp5\" (UniqueName: \"kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.838989 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.839087 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.842705 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.842939 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.859736 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5tp5\" (UniqueName: \"kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-mbmjk\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:51 crc kubenswrapper[4938]: I1122 11:02:51.920442 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.119713 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.121857 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.140536 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.252544 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx47j\" (UniqueName: \"kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.252594 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.252713 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.353888 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.354057 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx47j\" (UniqueName: \"kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.354083 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.354317 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.354448 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.386868 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx47j\" (UniqueName: \"kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j\") pod \"redhat-marketplace-w5lft\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.450969 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.505524 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk"] Nov 22 11:02:52 crc kubenswrapper[4938]: W1122 11:02:52.512174 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad415672_c278_4e60_b205_ff929432c200.slice/crio-552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6 WatchSource:0}: Error finding container 552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6: Status 404 returned error can't find the container with id 552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6 Nov 22 11:02:52 crc kubenswrapper[4938]: I1122 11:02:52.925572 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:02:52 crc kubenswrapper[4938]: W1122 11:02:52.926078 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3f29f4e_6e15_4926_affb_3b6c890f7556.slice/crio-ce0528f37b77b800e2ab89254162109f03690861d1a233829c59de702df0f90d WatchSource:0}: Error finding container ce0528f37b77b800e2ab89254162109f03690861d1a233829c59de702df0f90d: Status 404 returned error can't find the container with id ce0528f37b77b800e2ab89254162109f03690861d1a233829c59de702df0f90d Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.356136 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.566532 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" event={"ID":"ad415672-c278-4e60-b205-ff929432c200","Type":"ContainerStarted","Data":"2d63f38337198fe9bb7e8be5d15f97e662c9dcf23c751e10efc9690c6ba6baa0"} Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.568077 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" event={"ID":"ad415672-c278-4e60-b205-ff929432c200","Type":"ContainerStarted","Data":"552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6"} Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.575821 4938 generic.go:334] "Generic (PLEG): container finished" podID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerID="fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60" exitCode=0 Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.575873 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerDied","Data":"fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60"} Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.575899 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerStarted","Data":"ce0528f37b77b800e2ab89254162109f03690861d1a233829c59de702df0f90d"} Nov 22 11:02:53 crc kubenswrapper[4938]: I1122 11:02:53.591959 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" podStartSLOduration=2.18032948 podStartE2EDuration="2.591942232s" podCreationTimestamp="2025-11-22 11:02:51 +0000 UTC" firstStartedPulling="2025-11-22 11:02:52.515828004 +0000 UTC m=+1504.983665413" lastFinishedPulling="2025-11-22 11:02:52.927440766 +0000 UTC m=+1505.395278165" observedRunningTime="2025-11-22 11:02:53.586674632 +0000 UTC m=+1506.054512021" watchObservedRunningTime="2025-11-22 11:02:53.591942232 +0000 UTC m=+1506.059779631" Nov 22 11:02:54 crc kubenswrapper[4938]: I1122 11:02:54.460131 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:02:54 crc kubenswrapper[4938]: I1122 11:02:54.607193 4938 generic.go:334] "Generic (PLEG): container finished" podID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerID="cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011" exitCode=0 Nov 22 11:02:54 crc kubenswrapper[4938]: I1122 11:02:54.607250 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerDied","Data":"cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011"} Nov 22 11:02:55 crc kubenswrapper[4938]: I1122 11:02:55.616305 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerStarted","Data":"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a"} Nov 22 11:02:55 crc kubenswrapper[4938]: I1122 11:02:55.637051 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w5lft" podStartSLOduration=1.927428555 podStartE2EDuration="3.637033122s" podCreationTimestamp="2025-11-22 11:02:52 +0000 UTC" firstStartedPulling="2025-11-22 11:02:53.580065079 +0000 UTC m=+1506.047902478" lastFinishedPulling="2025-11-22 11:02:55.289669646 +0000 UTC m=+1507.757507045" observedRunningTime="2025-11-22 11:02:55.630771488 +0000 UTC m=+1508.098608887" watchObservedRunningTime="2025-11-22 11:02:55.637033122 +0000 UTC m=+1508.104870521" Nov 22 11:02:56 crc kubenswrapper[4938]: I1122 11:02:56.625731 4938 generic.go:334] "Generic (PLEG): container finished" podID="ad415672-c278-4e60-b205-ff929432c200" containerID="2d63f38337198fe9bb7e8be5d15f97e662c9dcf23c751e10efc9690c6ba6baa0" exitCode=0 Nov 22 11:02:56 crc kubenswrapper[4938]: I1122 11:02:56.625806 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" event={"ID":"ad415672-c278-4e60-b205-ff929432c200","Type":"ContainerDied","Data":"2d63f38337198fe9bb7e8be5d15f97e662c9dcf23c751e10efc9690c6ba6baa0"} Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.032689 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.090152 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory\") pod \"ad415672-c278-4e60-b205-ff929432c200\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.090325 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5tp5\" (UniqueName: \"kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5\") pod \"ad415672-c278-4e60-b205-ff929432c200\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.090422 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key\") pod \"ad415672-c278-4e60-b205-ff929432c200\" (UID: \"ad415672-c278-4e60-b205-ff929432c200\") " Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.096044 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5" (OuterVolumeSpecName: "kube-api-access-h5tp5") pod "ad415672-c278-4e60-b205-ff929432c200" (UID: "ad415672-c278-4e60-b205-ff929432c200"). InnerVolumeSpecName "kube-api-access-h5tp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.116817 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory" (OuterVolumeSpecName: "inventory") pod "ad415672-c278-4e60-b205-ff929432c200" (UID: "ad415672-c278-4e60-b205-ff929432c200"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.118890 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ad415672-c278-4e60-b205-ff929432c200" (UID: "ad415672-c278-4e60-b205-ff929432c200"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.192834 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.192868 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5tp5\" (UniqueName: \"kubernetes.io/projected/ad415672-c278-4e60-b205-ff929432c200-kube-api-access-h5tp5\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.192878 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad415672-c278-4e60-b205-ff929432c200-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.644957 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" event={"ID":"ad415672-c278-4e60-b205-ff929432c200","Type":"ContainerDied","Data":"552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6"} Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.644995 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="552bdd85f24e756fe1e1b089333cb17edb01f16d3b20b3899040f3284e2538e6" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.645077 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-mbmjk" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.703983 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8"] Nov 22 11:02:58 crc kubenswrapper[4938]: E1122 11:02:58.704484 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad415672-c278-4e60-b205-ff929432c200" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.704509 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad415672-c278-4e60-b205-ff929432c200" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.704788 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad415672-c278-4e60-b205-ff929432c200" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.705526 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.707602 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.708316 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.708592 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.709105 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.715097 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8"] Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.802266 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.802578 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.802647 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.802733 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7hhr\" (UniqueName: \"kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.904409 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.904536 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.904594 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.904702 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7hhr\" (UniqueName: \"kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.909596 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.909682 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.913194 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:58 crc kubenswrapper[4938]: I1122 11:02:58.920288 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7hhr\" (UniqueName: \"kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:59 crc kubenswrapper[4938]: I1122 11:02:59.028537 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:02:59 crc kubenswrapper[4938]: W1122 11:02:59.526746 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda53e09b5_739a_427e_b8f4_48fd612e9b07.slice/crio-8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645 WatchSource:0}: Error finding container 8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645: Status 404 returned error can't find the container with id 8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645 Nov 22 11:02:59 crc kubenswrapper[4938]: I1122 11:02:59.535482 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8"] Nov 22 11:02:59 crc kubenswrapper[4938]: I1122 11:02:59.654476 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" event={"ID":"a53e09b5-739a-427e-b8f4-48fd612e9b07","Type":"ContainerStarted","Data":"8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645"} Nov 22 11:03:00 crc kubenswrapper[4938]: I1122 11:03:00.664318 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" event={"ID":"a53e09b5-739a-427e-b8f4-48fd612e9b07","Type":"ContainerStarted","Data":"f3f30464aae12f7ffe67e44ca98600983e72ecb3f4fddd5614c68c00beeb7b27"} Nov 22 11:03:00 crc kubenswrapper[4938]: I1122 11:03:00.686623 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" podStartSLOduration=2.2041851 podStartE2EDuration="2.68659756s" podCreationTimestamp="2025-11-22 11:02:58 +0000 UTC" firstStartedPulling="2025-11-22 11:02:59.528820556 +0000 UTC m=+1511.996657955" lastFinishedPulling="2025-11-22 11:03:00.011233016 +0000 UTC m=+1512.479070415" observedRunningTime="2025-11-22 11:03:00.677926186 +0000 UTC m=+1513.145763585" watchObservedRunningTime="2025-11-22 11:03:00.68659756 +0000 UTC m=+1513.154434959" Nov 22 11:03:02 crc kubenswrapper[4938]: I1122 11:03:02.460391 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:02 crc kubenswrapper[4938]: I1122 11:03:02.460732 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:02 crc kubenswrapper[4938]: I1122 11:03:02.500804 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:02 crc kubenswrapper[4938]: I1122 11:03:02.724805 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:02 crc kubenswrapper[4938]: I1122 11:03:02.780234 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:03:04 crc kubenswrapper[4938]: I1122 11:03:04.699812 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w5lft" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="registry-server" containerID="cri-o://ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a" gracePeriod=2 Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.130159 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.224832 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content\") pod \"f3f29f4e-6e15-4926-affb-3b6c890f7556\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.225314 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zx47j\" (UniqueName: \"kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j\") pod \"f3f29f4e-6e15-4926-affb-3b6c890f7556\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.225393 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities\") pod \"f3f29f4e-6e15-4926-affb-3b6c890f7556\" (UID: \"f3f29f4e-6e15-4926-affb-3b6c890f7556\") " Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.226233 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities" (OuterVolumeSpecName: "utilities") pod "f3f29f4e-6e15-4926-affb-3b6c890f7556" (UID: "f3f29f4e-6e15-4926-affb-3b6c890f7556"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.230508 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j" (OuterVolumeSpecName: "kube-api-access-zx47j") pod "f3f29f4e-6e15-4926-affb-3b6c890f7556" (UID: "f3f29f4e-6e15-4926-affb-3b6c890f7556"). InnerVolumeSpecName "kube-api-access-zx47j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.246045 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3f29f4e-6e15-4926-affb-3b6c890f7556" (UID: "f3f29f4e-6e15-4926-affb-3b6c890f7556"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.327119 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zx47j\" (UniqueName: \"kubernetes.io/projected/f3f29f4e-6e15-4926-affb-3b6c890f7556-kube-api-access-zx47j\") on node \"crc\" DevicePath \"\"" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.327145 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.327154 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3f29f4e-6e15-4926-affb-3b6c890f7556-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.711407 4938 generic.go:334] "Generic (PLEG): container finished" podID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerID="ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a" exitCode=0 Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.711450 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerDied","Data":"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a"} Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.711474 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w5lft" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.711489 4938 scope.go:117] "RemoveContainer" containerID="ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.711478 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w5lft" event={"ID":"f3f29f4e-6e15-4926-affb-3b6c890f7556","Type":"ContainerDied","Data":"ce0528f37b77b800e2ab89254162109f03690861d1a233829c59de702df0f90d"} Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.744427 4938 scope.go:117] "RemoveContainer" containerID="cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.752249 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.764777 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w5lft"] Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.770958 4938 scope.go:117] "RemoveContainer" containerID="fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.811211 4938 scope.go:117] "RemoveContainer" containerID="ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a" Nov 22 11:03:05 crc kubenswrapper[4938]: E1122 11:03:05.811698 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a\": container with ID starting with ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a not found: ID does not exist" containerID="ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.811750 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a"} err="failed to get container status \"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a\": rpc error: code = NotFound desc = could not find container \"ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a\": container with ID starting with ebd3114f339f5e2f3d2299687dd13aecafd2a133c972ed5e6d4937659a8b028a not found: ID does not exist" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.811781 4938 scope.go:117] "RemoveContainer" containerID="cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011" Nov 22 11:03:05 crc kubenswrapper[4938]: E1122 11:03:05.812252 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011\": container with ID starting with cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011 not found: ID does not exist" containerID="cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.812284 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011"} err="failed to get container status \"cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011\": rpc error: code = NotFound desc = could not find container \"cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011\": container with ID starting with cc0112e842673804148e64b59a8eb63df16d91e5fe6adff1ba3a003b2cebd011 not found: ID does not exist" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.812311 4938 scope.go:117] "RemoveContainer" containerID="fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60" Nov 22 11:03:05 crc kubenswrapper[4938]: E1122 11:03:05.812607 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60\": container with ID starting with fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60 not found: ID does not exist" containerID="fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60" Nov 22 11:03:05 crc kubenswrapper[4938]: I1122 11:03:05.812649 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60"} err="failed to get container status \"fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60\": rpc error: code = NotFound desc = could not find container \"fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60\": container with ID starting with fc1b242a60d53542fdc7db7c9933c763d95b92e3dccbe95a02a8dd19273fbe60 not found: ID does not exist" Nov 22 11:03:06 crc kubenswrapper[4938]: I1122 11:03:06.458138 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" path="/var/lib/kubelet/pods/f3f29f4e-6e15-4926-affb-3b6c890f7556/volumes" Nov 22 11:03:11 crc kubenswrapper[4938]: I1122 11:03:11.301268 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:03:11 crc kubenswrapper[4938]: I1122 11:03:11.301827 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:03:23 crc kubenswrapper[4938]: I1122 11:03:23.620540 4938 scope.go:117] "RemoveContainer" containerID="17c6db99431b037bf08e4a0e783e917142cb0caf2d7ba06872fc681dbe0e72c4" Nov 22 11:03:23 crc kubenswrapper[4938]: I1122 11:03:23.658633 4938 scope.go:117] "RemoveContainer" containerID="cca482e3ff4f18deaae67014776c517027026d5147e33866a8e05dcc9c78246d" Nov 22 11:03:41 crc kubenswrapper[4938]: I1122 11:03:41.300706 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:03:41 crc kubenswrapper[4938]: I1122 11:03:41.301361 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:03:41 crc kubenswrapper[4938]: I1122 11:03:41.301414 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:03:41 crc kubenswrapper[4938]: I1122 11:03:41.302220 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:03:41 crc kubenswrapper[4938]: I1122 11:03:41.302288 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" gracePeriod=600 Nov 22 11:03:41 crc kubenswrapper[4938]: E1122 11:03:41.437193 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:03:42 crc kubenswrapper[4938]: I1122 11:03:42.048409 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" exitCode=0 Nov 22 11:03:42 crc kubenswrapper[4938]: I1122 11:03:42.048457 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c"} Nov 22 11:03:42 crc kubenswrapper[4938]: I1122 11:03:42.048493 4938 scope.go:117] "RemoveContainer" containerID="f32603d3cd38d3d94b04f506650486f2678a5b58ca5be3b20ab1308b521f5361" Nov 22 11:03:42 crc kubenswrapper[4938]: I1122 11:03:42.049098 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:03:42 crc kubenswrapper[4938]: E1122 11:03:42.049399 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:03:53 crc kubenswrapper[4938]: I1122 11:03:53.446989 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:03:53 crc kubenswrapper[4938]: E1122 11:03:53.447760 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:04:05 crc kubenswrapper[4938]: I1122 11:04:05.447265 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:04:05 crc kubenswrapper[4938]: E1122 11:04:05.448209 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:04:16 crc kubenswrapper[4938]: I1122 11:04:16.447523 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:04:16 crc kubenswrapper[4938]: E1122 11:04:16.448149 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:04:23 crc kubenswrapper[4938]: I1122 11:04:23.760944 4938 scope.go:117] "RemoveContainer" containerID="68dca4e446261896aa331ae54d57f9ea910e2aa585ce6d395866cc115f6957f1" Nov 22 11:04:31 crc kubenswrapper[4938]: I1122 11:04:31.447821 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:04:31 crc kubenswrapper[4938]: E1122 11:04:31.448607 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:04:46 crc kubenswrapper[4938]: I1122 11:04:46.447077 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:04:46 crc kubenswrapper[4938]: E1122 11:04:46.447807 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:04:59 crc kubenswrapper[4938]: I1122 11:04:59.448224 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:04:59 crc kubenswrapper[4938]: E1122 11:04:59.449393 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:05:11 crc kubenswrapper[4938]: I1122 11:05:11.447981 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:05:11 crc kubenswrapper[4938]: E1122 11:05:11.448764 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:05:23 crc kubenswrapper[4938]: I1122 11:05:23.833894 4938 scope.go:117] "RemoveContainer" containerID="30333e0a74e81ce344627b28176a83fd53d72e1e91705c2bf758188a8b5666c8" Nov 22 11:05:23 crc kubenswrapper[4938]: I1122 11:05:23.858003 4938 scope.go:117] "RemoveContainer" containerID="7237993756da1207b595f4b3b36fefae50714ad40758045c8369d8564fc43995" Nov 22 11:05:23 crc kubenswrapper[4938]: I1122 11:05:23.901845 4938 scope.go:117] "RemoveContainer" containerID="9c3fd100bb65d322a041a369c5e56cf59728e6672ff37cc987ab646c33e9bebf" Nov 22 11:05:24 crc kubenswrapper[4938]: I1122 11:05:24.447964 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:05:24 crc kubenswrapper[4938]: E1122 11:05:24.448350 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:05:36 crc kubenswrapper[4938]: I1122 11:05:36.448126 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:05:36 crc kubenswrapper[4938]: E1122 11:05:36.448906 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:05:48 crc kubenswrapper[4938]: I1122 11:05:48.454434 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:05:48 crc kubenswrapper[4938]: E1122 11:05:48.455515 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:05:59 crc kubenswrapper[4938]: I1122 11:05:59.447432 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:05:59 crc kubenswrapper[4938]: E1122 11:05:59.448290 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:06:11 crc kubenswrapper[4938]: I1122 11:06:11.447475 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:06:11 crc kubenswrapper[4938]: E1122 11:06:11.448285 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:06:17 crc kubenswrapper[4938]: I1122 11:06:17.442147 4938 generic.go:334] "Generic (PLEG): container finished" podID="a53e09b5-739a-427e-b8f4-48fd612e9b07" containerID="f3f30464aae12f7ffe67e44ca98600983e72ecb3f4fddd5614c68c00beeb7b27" exitCode=0 Nov 22 11:06:17 crc kubenswrapper[4938]: I1122 11:06:17.442227 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" event={"ID":"a53e09b5-739a-427e-b8f4-48fd612e9b07","Type":"ContainerDied","Data":"f3f30464aae12f7ffe67e44ca98600983e72ecb3f4fddd5614c68c00beeb7b27"} Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.828868 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.949589 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory\") pod \"a53e09b5-739a-427e-b8f4-48fd612e9b07\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.949654 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle\") pod \"a53e09b5-739a-427e-b8f4-48fd612e9b07\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.949791 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key\") pod \"a53e09b5-739a-427e-b8f4-48fd612e9b07\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.949938 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7hhr\" (UniqueName: \"kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr\") pod \"a53e09b5-739a-427e-b8f4-48fd612e9b07\" (UID: \"a53e09b5-739a-427e-b8f4-48fd612e9b07\") " Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.955063 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a53e09b5-739a-427e-b8f4-48fd612e9b07" (UID: "a53e09b5-739a-427e-b8f4-48fd612e9b07"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.959072 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr" (OuterVolumeSpecName: "kube-api-access-q7hhr") pod "a53e09b5-739a-427e-b8f4-48fd612e9b07" (UID: "a53e09b5-739a-427e-b8f4-48fd612e9b07"). InnerVolumeSpecName "kube-api-access-q7hhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.975782 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory" (OuterVolumeSpecName: "inventory") pod "a53e09b5-739a-427e-b8f4-48fd612e9b07" (UID: "a53e09b5-739a-427e-b8f4-48fd612e9b07"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:06:18 crc kubenswrapper[4938]: I1122 11:06:18.978412 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a53e09b5-739a-427e-b8f4-48fd612e9b07" (UID: "a53e09b5-739a-427e-b8f4-48fd612e9b07"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.052522 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7hhr\" (UniqueName: \"kubernetes.io/projected/a53e09b5-739a-427e-b8f4-48fd612e9b07-kube-api-access-q7hhr\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.052555 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.052566 4938 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.052576 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a53e09b5-739a-427e-b8f4-48fd612e9b07-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.476100 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" event={"ID":"a53e09b5-739a-427e-b8f4-48fd612e9b07","Type":"ContainerDied","Data":"8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645"} Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.476142 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f4b38bea4a1d3e5b209f70ebd63e405bc64aa56f2410ebc8b3ddfb75c204645" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.476216 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534019 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt"] Nov 22 11:06:19 crc kubenswrapper[4938]: E1122 11:06:19.534477 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53e09b5-739a-427e-b8f4-48fd612e9b07" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534498 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53e09b5-739a-427e-b8f4-48fd612e9b07" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:19 crc kubenswrapper[4938]: E1122 11:06:19.534514 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="extract-content" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534523 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="extract-content" Nov 22 11:06:19 crc kubenswrapper[4938]: E1122 11:06:19.534543 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="extract-utilities" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534553 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="extract-utilities" Nov 22 11:06:19 crc kubenswrapper[4938]: E1122 11:06:19.534566 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="registry-server" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534576 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="registry-server" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534789 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="a53e09b5-739a-427e-b8f4-48fd612e9b07" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.534831 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3f29f4e-6e15-4926-affb-3b6c890f7556" containerName="registry-server" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.535576 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.538275 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.538455 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.538576 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.538809 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.545714 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt"] Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.662984 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhcf5\" (UniqueName: \"kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.663046 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.663078 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.764602 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhcf5\" (UniqueName: \"kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.764685 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.764712 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.770635 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.772449 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.786809 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhcf5\" (UniqueName: \"kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-98krt\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:19 crc kubenswrapper[4938]: I1122 11:06:19.862259 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:06:20 crc kubenswrapper[4938]: I1122 11:06:20.388805 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt"] Nov 22 11:06:20 crc kubenswrapper[4938]: I1122 11:06:20.399363 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:06:20 crc kubenswrapper[4938]: I1122 11:06:20.485820 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" event={"ID":"073859e3-9fc9-45e3-a311-34411cea1556","Type":"ContainerStarted","Data":"d5377e71d760f8ccbabb08cb602c095b8c7e4094df572f3863527b560bdd2a57"} Nov 22 11:06:21 crc kubenswrapper[4938]: I1122 11:06:21.495414 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" event={"ID":"073859e3-9fc9-45e3-a311-34411cea1556","Type":"ContainerStarted","Data":"9644d9961e7ba55751a9422e01c0664148d69c62ef174ec555ab980ecb44b1f6"} Nov 22 11:06:21 crc kubenswrapper[4938]: I1122 11:06:21.516591 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" podStartSLOduration=2.095583059 podStartE2EDuration="2.516570524s" podCreationTimestamp="2025-11-22 11:06:19 +0000 UTC" firstStartedPulling="2025-11-22 11:06:20.398977582 +0000 UTC m=+1712.866814981" lastFinishedPulling="2025-11-22 11:06:20.819965047 +0000 UTC m=+1713.287802446" observedRunningTime="2025-11-22 11:06:21.510586015 +0000 UTC m=+1713.978423414" watchObservedRunningTime="2025-11-22 11:06:21.516570524 +0000 UTC m=+1713.984407913" Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.036410 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-r44fd"] Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.045964 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-w48hh"] Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.054422 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-r44fd"] Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.062015 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-w48hh"] Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.448028 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:06:23 crc kubenswrapper[4938]: E1122 11:06:23.448355 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.978665 4938 scope.go:117] "RemoveContainer" containerID="e9ca44502bada600f84c79512fe7e3e9a584a5309ce226beaa15211e8a6b4373" Nov 22 11:06:23 crc kubenswrapper[4938]: I1122 11:06:23.997897 4938 scope.go:117] "RemoveContainer" containerID="a28f9a7d43054481cd014004ec6725abbfc8b745e6826dd9f786c79bdfd63bca" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.023428 4938 scope.go:117] "RemoveContainer" containerID="8aee91714eb7b8683df510e891af0cc2ce4e183b33f8d080061f5cc34a8ec99c" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.036003 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-zw4nk"] Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.047069 4938 scope.go:117] "RemoveContainer" containerID="036e5c6978df991b9af76e857b5a0b281796b76f4d8ad2e761a4e856411fa59b" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.055228 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-zw4nk"] Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.068060 4938 scope.go:117] "RemoveContainer" containerID="721a7ca8713e0eac25790794b11fb0ba077d45efa26ddec6458d6794eb239df9" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.458629 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67463515-1e01-40aa-b47b-e15d1dc63ef8" path="/var/lib/kubelet/pods/67463515-1e01-40aa-b47b-e15d1dc63ef8/volumes" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.459684 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82b8dcb0-b9c8-4773-a64f-e6d6e89df241" path="/var/lib/kubelet/pods/82b8dcb0-b9c8-4773-a64f-e6d6e89df241/volumes" Nov 22 11:06:24 crc kubenswrapper[4938]: I1122 11:06:24.460235 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e880927-eabf-4d36-ae6d-40aa89780b1b" path="/var/lib/kubelet/pods/8e880927-eabf-4d36-ae6d-40aa89780b1b/volumes" Nov 22 11:06:35 crc kubenswrapper[4938]: I1122 11:06:35.447214 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:06:35 crc kubenswrapper[4938]: E1122 11:06:35.447966 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.035992 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5140-account-create-cbl8j"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.046270 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-18f1-account-create-wp2lb"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.058771 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-80b6-account-create-qj8v6"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.071209 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5140-account-create-cbl8j"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.081754 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-18f1-account-create-wp2lb"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.089659 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-80b6-account-create-qj8v6"] Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.465997 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075db7dd-b0f9-4b53-8a2e-02af60130f52" path="/var/lib/kubelet/pods/075db7dd-b0f9-4b53-8a2e-02af60130f52/volumes" Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.467410 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="327fd7ac-029b-487f-a63c-28667c1ad2e2" path="/var/lib/kubelet/pods/327fd7ac-029b-487f-a63c-28667c1ad2e2/volumes" Nov 22 11:06:46 crc kubenswrapper[4938]: I1122 11:06:46.467927 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71c28968-eb30-4073-843c-65ddfd4a5073" path="/var/lib/kubelet/pods/71c28968-eb30-4073-843c-65ddfd4a5073/volumes" Nov 22 11:06:47 crc kubenswrapper[4938]: I1122 11:06:47.447745 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:06:47 crc kubenswrapper[4938]: E1122 11:06:47.448308 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:06:59 crc kubenswrapper[4938]: I1122 11:06:59.447739 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:06:59 crc kubenswrapper[4938]: E1122 11:06:59.448552 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.041154 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-lr6cz"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.059336 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-5bmmm"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.069894 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-d64lt"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.079537 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-lr6cz"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.087673 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-5bmmm"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.096701 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-d64lt"] Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.463283 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f269f03-228e-42e2-b49f-106fcb2bc258" path="/var/lib/kubelet/pods/0f269f03-228e-42e2-b49f-106fcb2bc258/volumes" Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.464421 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="577e27a5-3da6-4a00-9897-be6ef6a50c58" path="/var/lib/kubelet/pods/577e27a5-3da6-4a00-9897-be6ef6a50c58/volumes" Nov 22 11:07:08 crc kubenswrapper[4938]: I1122 11:07:08.464997 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f645453d-fd00-45da-bc78-1f8bde75b6e3" path="/var/lib/kubelet/pods/f645453d-fd00-45da-bc78-1f8bde75b6e3/volumes" Nov 22 11:07:11 crc kubenswrapper[4938]: I1122 11:07:11.458057 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:07:11 crc kubenswrapper[4938]: E1122 11:07:11.458665 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:07:21 crc kubenswrapper[4938]: I1122 11:07:21.042471 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c5fa-account-create-qwnhv"] Nov 22 11:07:21 crc kubenswrapper[4938]: I1122 11:07:21.053231 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7ebc-account-create-88cmf"] Nov 22 11:07:21 crc kubenswrapper[4938]: I1122 11:07:21.064540 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c5fa-account-create-qwnhv"] Nov 22 11:07:21 crc kubenswrapper[4938]: I1122 11:07:21.071525 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7ebc-account-create-88cmf"] Nov 22 11:07:22 crc kubenswrapper[4938]: I1122 11:07:22.030886 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-adde-account-create-8htj4"] Nov 22 11:07:22 crc kubenswrapper[4938]: I1122 11:07:22.039229 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-adde-account-create-8htj4"] Nov 22 11:07:22 crc kubenswrapper[4938]: I1122 11:07:22.457796 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ee48b43-df7f-4bb1-b037-8509d4ccee45" path="/var/lib/kubelet/pods/4ee48b43-df7f-4bb1-b037-8509d4ccee45/volumes" Nov 22 11:07:22 crc kubenswrapper[4938]: I1122 11:07:22.458746 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81258080-d7e9-4f68-b261-fb3e9c467fed" path="/var/lib/kubelet/pods/81258080-d7e9-4f68-b261-fb3e9c467fed/volumes" Nov 22 11:07:22 crc kubenswrapper[4938]: I1122 11:07:22.459353 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b" path="/var/lib/kubelet/pods/ac19c1e7-231e-4dd0-84ce-0e7cc54feb9b/volumes" Nov 22 11:07:23 crc kubenswrapper[4938]: I1122 11:07:23.447569 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:07:23 crc kubenswrapper[4938]: E1122 11:07:23.447831 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.129957 4938 scope.go:117] "RemoveContainer" containerID="9a08f6e69f11d128d3b7000b90674a6fd9c82efedbd37e69eaf32a169c94b907" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.165597 4938 scope.go:117] "RemoveContainer" containerID="4d8635fd34904ad365126e4cbe9b13e7ec937b64204c1c5ccaffd8eea5521edd" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.192058 4938 scope.go:117] "RemoveContainer" containerID="1ce96aa0a206ea67b41747b27cf69edb9cd5e5b3b31fba184220fda1c4889227" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.230873 4938 scope.go:117] "RemoveContainer" containerID="ce9bff0db94af30448ba1164fa0fcaf09033b66dcfe20f667a6df374ecfce7bf" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.287988 4938 scope.go:117] "RemoveContainer" containerID="d216f5ec180a21957a61d54283f1c08881bbb217b37823302fee45b4f190e3cf" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.311161 4938 scope.go:117] "RemoveContainer" containerID="5c86082550534649d7823b2c65a8cc98e8cd46fbca02e5267f6b43cc9635dd1d" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.353313 4938 scope.go:117] "RemoveContainer" containerID="f411d9ecb9ff3d4af45cd9a9ff9ab36212f4ea144391afa122c76d81c0158f38" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.384274 4938 scope.go:117] "RemoveContainer" containerID="087ec4c04f9e9f8ec2461fe58f8cc4971238e43c21bacccd8673b4cdbd80ae53" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.408464 4938 scope.go:117] "RemoveContainer" containerID="89d3eaf62d690ce5ec14a79c6c8bc1632b827cd50359c60dc463ece747bc0e72" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.426117 4938 scope.go:117] "RemoveContainer" containerID="cd7b60d2938e2770cfe2feb91a0aba1b7d1697b025b4311e086ba77ea683bb6d" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.450605 4938 scope.go:117] "RemoveContainer" containerID="a9afe0852c244f1a9e61616de98b8ce5f0bbd91626d13eccd5d9e782b1e8a69e" Nov 22 11:07:24 crc kubenswrapper[4938]: I1122 11:07:24.470388 4938 scope.go:117] "RemoveContainer" containerID="f83161d1b77395d21f5c7f6dcadbefa730542f6f0cfae26215b24faba3d96355" Nov 22 11:07:26 crc kubenswrapper[4938]: I1122 11:07:26.034678 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-x62vc"] Nov 22 11:07:26 crc kubenswrapper[4938]: I1122 11:07:26.045535 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-x62vc"] Nov 22 11:07:26 crc kubenswrapper[4938]: I1122 11:07:26.466761 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87059052-68f8-4e9c-9701-51a7f618e383" path="/var/lib/kubelet/pods/87059052-68f8-4e9c-9701-51a7f618e383/volumes" Nov 22 11:07:27 crc kubenswrapper[4938]: I1122 11:07:27.033867 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wn476"] Nov 22 11:07:27 crc kubenswrapper[4938]: I1122 11:07:27.042203 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wn476"] Nov 22 11:07:28 crc kubenswrapper[4938]: I1122 11:07:28.459311 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="181febdf-4ec0-45f1-a062-f2f097504deb" path="/var/lib/kubelet/pods/181febdf-4ec0-45f1-a062-f2f097504deb/volumes" Nov 22 11:07:32 crc kubenswrapper[4938]: I1122 11:07:32.909870 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:32 crc kubenswrapper[4938]: I1122 11:07:32.913514 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:32 crc kubenswrapper[4938]: I1122 11:07:32.930014 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.058716 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.058774 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j88t\" (UniqueName: \"kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.058971 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.103558 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.106054 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.118422 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.160607 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.160802 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.160835 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j88t\" (UniqueName: \"kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.161259 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.161465 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.187685 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j88t\" (UniqueName: \"kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t\") pod \"certified-operators-2ct9v\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.242445 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.262191 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.262425 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq2r8\" (UniqueName: \"kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.262504 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.364254 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq2r8\" (UniqueName: \"kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.364307 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.364427 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.364921 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.365429 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.396025 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq2r8\" (UniqueName: \"kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8\") pod \"community-operators-rt7q9\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.420858 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.778744 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:33 crc kubenswrapper[4938]: I1122 11:07:33.974861 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:34 crc kubenswrapper[4938]: I1122 11:07:34.172017 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerStarted","Data":"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418"} Nov 22 11:07:34 crc kubenswrapper[4938]: I1122 11:07:34.172069 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerStarted","Data":"0fae1b2e78902f12f892749e88ebc254cbc42e2599559d882f91c00739fd75b5"} Nov 22 11:07:34 crc kubenswrapper[4938]: I1122 11:07:34.175383 4938 generic.go:334] "Generic (PLEG): container finished" podID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerID="75828763c1dea86081b6c937bd4693cd2bd203cb49c3dcfa2d9e3956d43864f6" exitCode=0 Nov 22 11:07:34 crc kubenswrapper[4938]: I1122 11:07:34.175449 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerDied","Data":"75828763c1dea86081b6c937bd4693cd2bd203cb49c3dcfa2d9e3956d43864f6"} Nov 22 11:07:34 crc kubenswrapper[4938]: I1122 11:07:34.175468 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerStarted","Data":"e15f58560ebd75a0d7633f488b76c6da2a482c6969114bc0735428e883c23b84"} Nov 22 11:07:35 crc kubenswrapper[4938]: I1122 11:07:35.185816 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerStarted","Data":"651d336a89a16d4a74e19dfeb36eae088466835865543525184f6d68ebf9af19"} Nov 22 11:07:35 crc kubenswrapper[4938]: I1122 11:07:35.187378 4938 generic.go:334] "Generic (PLEG): container finished" podID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerID="2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418" exitCode=0 Nov 22 11:07:35 crc kubenswrapper[4938]: I1122 11:07:35.187419 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerDied","Data":"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418"} Nov 22 11:07:36 crc kubenswrapper[4938]: I1122 11:07:36.197264 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerStarted","Data":"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b"} Nov 22 11:07:36 crc kubenswrapper[4938]: I1122 11:07:36.200579 4938 generic.go:334] "Generic (PLEG): container finished" podID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerID="651d336a89a16d4a74e19dfeb36eae088466835865543525184f6d68ebf9af19" exitCode=0 Nov 22 11:07:36 crc kubenswrapper[4938]: I1122 11:07:36.200629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerDied","Data":"651d336a89a16d4a74e19dfeb36eae088466835865543525184f6d68ebf9af19"} Nov 22 11:07:37 crc kubenswrapper[4938]: I1122 11:07:37.211435 4938 generic.go:334] "Generic (PLEG): container finished" podID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerID="29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b" exitCode=0 Nov 22 11:07:37 crc kubenswrapper[4938]: I1122 11:07:37.211610 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerDied","Data":"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b"} Nov 22 11:07:37 crc kubenswrapper[4938]: I1122 11:07:37.447546 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:07:37 crc kubenswrapper[4938]: E1122 11:07:37.447934 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:07:38 crc kubenswrapper[4938]: I1122 11:07:38.221359 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerStarted","Data":"ffcc8e03ffdc9af69d8a69d40f0bbdb952117d58b37e7b0ce215993f27523b1f"} Nov 22 11:07:38 crc kubenswrapper[4938]: I1122 11:07:38.244886 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2ct9v" podStartSLOduration=3.126009328 podStartE2EDuration="6.244869728s" podCreationTimestamp="2025-11-22 11:07:32 +0000 UTC" firstStartedPulling="2025-11-22 11:07:34.177068588 +0000 UTC m=+1786.644905987" lastFinishedPulling="2025-11-22 11:07:37.295928988 +0000 UTC m=+1789.763766387" observedRunningTime="2025-11-22 11:07:38.237000502 +0000 UTC m=+1790.704837901" watchObservedRunningTime="2025-11-22 11:07:38.244869728 +0000 UTC m=+1790.712707127" Nov 22 11:07:39 crc kubenswrapper[4938]: I1122 11:07:39.231826 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerStarted","Data":"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f"} Nov 22 11:07:39 crc kubenswrapper[4938]: I1122 11:07:39.254026 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rt7q9" podStartSLOduration=3.173891386 podStartE2EDuration="6.254003784s" podCreationTimestamp="2025-11-22 11:07:33 +0000 UTC" firstStartedPulling="2025-11-22 11:07:35.189199358 +0000 UTC m=+1787.657036777" lastFinishedPulling="2025-11-22 11:07:38.269311776 +0000 UTC m=+1790.737149175" observedRunningTime="2025-11-22 11:07:39.249834761 +0000 UTC m=+1791.717672160" watchObservedRunningTime="2025-11-22 11:07:39.254003784 +0000 UTC m=+1791.721841183" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.243153 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.243587 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.285502 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.330870 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.421320 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.421374 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.467452 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:43 crc kubenswrapper[4938]: I1122 11:07:43.693707 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:44 crc kubenswrapper[4938]: I1122 11:07:44.342927 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:45 crc kubenswrapper[4938]: I1122 11:07:45.281043 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2ct9v" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="registry-server" containerID="cri-o://ffcc8e03ffdc9af69d8a69d40f0bbdb952117d58b37e7b0ce215993f27523b1f" gracePeriod=2 Nov 22 11:07:45 crc kubenswrapper[4938]: I1122 11:07:45.892333 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.290285 4938 generic.go:334] "Generic (PLEG): container finished" podID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerID="ffcc8e03ffdc9af69d8a69d40f0bbdb952117d58b37e7b0ce215993f27523b1f" exitCode=0 Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.290691 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rt7q9" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="registry-server" containerID="cri-o://88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f" gracePeriod=2 Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.291018 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerDied","Data":"ffcc8e03ffdc9af69d8a69d40f0bbdb952117d58b37e7b0ce215993f27523b1f"} Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.720276 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.857374 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.880974 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq2r8\" (UniqueName: \"kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8\") pod \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.881309 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content\") pod \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.881445 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities\") pod \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\" (UID: \"c131c7bb-2e6f-4d78-a008-fd5ee2760542\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.882516 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities" (OuterVolumeSpecName: "utilities") pod "c131c7bb-2e6f-4d78-a008-fd5ee2760542" (UID: "c131c7bb-2e6f-4d78-a008-fd5ee2760542"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.888207 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8" (OuterVolumeSpecName: "kube-api-access-qq2r8") pod "c131c7bb-2e6f-4d78-a008-fd5ee2760542" (UID: "c131c7bb-2e6f-4d78-a008-fd5ee2760542"). InnerVolumeSpecName "kube-api-access-qq2r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.930764 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c131c7bb-2e6f-4d78-a008-fd5ee2760542" (UID: "c131c7bb-2e6f-4d78-a008-fd5ee2760542"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.982593 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j88t\" (UniqueName: \"kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t\") pod \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.983002 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content\") pod \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.983312 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities\") pod \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\" (UID: \"77a9aa3d-75da-4642-9bce-01f57b9fe1da\") " Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.983973 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq2r8\" (UniqueName: \"kubernetes.io/projected/c131c7bb-2e6f-4d78-a008-fd5ee2760542-kube-api-access-qq2r8\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.984071 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.984145 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c131c7bb-2e6f-4d78-a008-fd5ee2760542-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.984158 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities" (OuterVolumeSpecName: "utilities") pod "77a9aa3d-75da-4642-9bce-01f57b9fe1da" (UID: "77a9aa3d-75da-4642-9bce-01f57b9fe1da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:46 crc kubenswrapper[4938]: I1122 11:07:46.985690 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t" (OuterVolumeSpecName: "kube-api-access-6j88t") pod "77a9aa3d-75da-4642-9bce-01f57b9fe1da" (UID: "77a9aa3d-75da-4642-9bce-01f57b9fe1da"). InnerVolumeSpecName "kube-api-access-6j88t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.027189 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77a9aa3d-75da-4642-9bce-01f57b9fe1da" (UID: "77a9aa3d-75da-4642-9bce-01f57b9fe1da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.086206 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j88t\" (UniqueName: \"kubernetes.io/projected/77a9aa3d-75da-4642-9bce-01f57b9fe1da-kube-api-access-6j88t\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.086247 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.086259 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a9aa3d-75da-4642-9bce-01f57b9fe1da-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.300017 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2ct9v" event={"ID":"77a9aa3d-75da-4642-9bce-01f57b9fe1da","Type":"ContainerDied","Data":"e15f58560ebd75a0d7633f488b76c6da2a482c6969114bc0735428e883c23b84"} Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.300315 4938 scope.go:117] "RemoveContainer" containerID="ffcc8e03ffdc9af69d8a69d40f0bbdb952117d58b37e7b0ce215993f27523b1f" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.300522 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2ct9v" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.304069 4938 generic.go:334] "Generic (PLEG): container finished" podID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerID="88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f" exitCode=0 Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.304114 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerDied","Data":"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f"} Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.304139 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rt7q9" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.304142 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rt7q9" event={"ID":"c131c7bb-2e6f-4d78-a008-fd5ee2760542","Type":"ContainerDied","Data":"0fae1b2e78902f12f892749e88ebc254cbc42e2599559d882f91c00739fd75b5"} Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.320718 4938 scope.go:117] "RemoveContainer" containerID="651d336a89a16d4a74e19dfeb36eae088466835865543525184f6d68ebf9af19" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.336193 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.345326 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2ct9v"] Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.352395 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.355819 4938 scope.go:117] "RemoveContainer" containerID="75828763c1dea86081b6c937bd4693cd2bd203cb49c3dcfa2d9e3956d43864f6" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.358849 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rt7q9"] Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.372666 4938 scope.go:117] "RemoveContainer" containerID="88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.388414 4938 scope.go:117] "RemoveContainer" containerID="29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.408446 4938 scope.go:117] "RemoveContainer" containerID="2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.455053 4938 scope.go:117] "RemoveContainer" containerID="88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f" Nov 22 11:07:47 crc kubenswrapper[4938]: E1122 11:07:47.455475 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f\": container with ID starting with 88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f not found: ID does not exist" containerID="88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.455522 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f"} err="failed to get container status \"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f\": rpc error: code = NotFound desc = could not find container \"88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f\": container with ID starting with 88426dd6e348e389f35dd51519cabd4c48f4fd3851e5a818686c4ea30c80db4f not found: ID does not exist" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.455555 4938 scope.go:117] "RemoveContainer" containerID="29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b" Nov 22 11:07:47 crc kubenswrapper[4938]: E1122 11:07:47.455892 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b\": container with ID starting with 29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b not found: ID does not exist" containerID="29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.455965 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b"} err="failed to get container status \"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b\": rpc error: code = NotFound desc = could not find container \"29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b\": container with ID starting with 29f1e025cc135e47641d6c52614f6ed824d84d8f8141ad095f55b255c51a5d3b not found: ID does not exist" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.456003 4938 scope.go:117] "RemoveContainer" containerID="2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418" Nov 22 11:07:47 crc kubenswrapper[4938]: E1122 11:07:47.456461 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418\": container with ID starting with 2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418 not found: ID does not exist" containerID="2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418" Nov 22 11:07:47 crc kubenswrapper[4938]: I1122 11:07:47.456485 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418"} err="failed to get container status \"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418\": rpc error: code = NotFound desc = could not find container \"2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418\": container with ID starting with 2f01275cdd2e12445d0fc9c352773895cac66be2e1baa6d48cd1a01b5c4de418 not found: ID does not exist" Nov 22 11:07:48 crc kubenswrapper[4938]: I1122 11:07:48.459151 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" path="/var/lib/kubelet/pods/77a9aa3d-75da-4642-9bce-01f57b9fe1da/volumes" Nov 22 11:07:48 crc kubenswrapper[4938]: I1122 11:07:48.460252 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" path="/var/lib/kubelet/pods/c131c7bb-2e6f-4d78-a008-fd5ee2760542/volumes" Nov 22 11:07:51 crc kubenswrapper[4938]: I1122 11:07:51.447622 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:07:51 crc kubenswrapper[4938]: E1122 11:07:51.448541 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:08:03 crc kubenswrapper[4938]: I1122 11:08:03.447575 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:08:03 crc kubenswrapper[4938]: E1122 11:08:03.448445 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:08:06 crc kubenswrapper[4938]: I1122 11:08:06.485583 4938 generic.go:334] "Generic (PLEG): container finished" podID="073859e3-9fc9-45e3-a311-34411cea1556" containerID="9644d9961e7ba55751a9422e01c0664148d69c62ef174ec555ab980ecb44b1f6" exitCode=0 Nov 22 11:08:06 crc kubenswrapper[4938]: I1122 11:08:06.485947 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" event={"ID":"073859e3-9fc9-45e3-a311-34411cea1556","Type":"ContainerDied","Data":"9644d9961e7ba55751a9422e01c0664148d69c62ef174ec555ab980ecb44b1f6"} Nov 22 11:08:07 crc kubenswrapper[4938]: I1122 11:08:07.866563 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:08:07 crc kubenswrapper[4938]: I1122 11:08:07.987627 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhcf5\" (UniqueName: \"kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5\") pod \"073859e3-9fc9-45e3-a311-34411cea1556\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " Nov 22 11:08:07 crc kubenswrapper[4938]: I1122 11:08:07.987756 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key\") pod \"073859e3-9fc9-45e3-a311-34411cea1556\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " Nov 22 11:08:07 crc kubenswrapper[4938]: I1122 11:08:07.987796 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory\") pod \"073859e3-9fc9-45e3-a311-34411cea1556\" (UID: \"073859e3-9fc9-45e3-a311-34411cea1556\") " Nov 22 11:08:07 crc kubenswrapper[4938]: I1122 11:08:07.992957 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5" (OuterVolumeSpecName: "kube-api-access-lhcf5") pod "073859e3-9fc9-45e3-a311-34411cea1556" (UID: "073859e3-9fc9-45e3-a311-34411cea1556"). InnerVolumeSpecName "kube-api-access-lhcf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.012782 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory" (OuterVolumeSpecName: "inventory") pod "073859e3-9fc9-45e3-a311-34411cea1556" (UID: "073859e3-9fc9-45e3-a311-34411cea1556"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.013144 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "073859e3-9fc9-45e3-a311-34411cea1556" (UID: "073859e3-9fc9-45e3-a311-34411cea1556"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.090015 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.090044 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/073859e3-9fc9-45e3-a311-34411cea1556-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.090055 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhcf5\" (UniqueName: \"kubernetes.io/projected/073859e3-9fc9-45e3-a311-34411cea1556-kube-api-access-lhcf5\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.511187 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" event={"ID":"073859e3-9fc9-45e3-a311-34411cea1556","Type":"ContainerDied","Data":"d5377e71d760f8ccbabb08cb602c095b8c7e4094df572f3863527b560bdd2a57"} Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.511244 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5377e71d760f8ccbabb08cb602c095b8c7e4094df572f3863527b560bdd2a57" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.511243 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-98krt" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618123 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w"] Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618485 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="extract-content" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618501 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="extract-content" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618528 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="extract-content" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618535 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="extract-content" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618544 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="extract-utilities" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618550 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="extract-utilities" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618572 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618579 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618592 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="extract-utilities" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618598 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="extract-utilities" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618613 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618618 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: E1122 11:08:08.618635 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073859e3-9fc9-45e3-a311-34411cea1556" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618641 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="073859e3-9fc9-45e3-a311-34411cea1556" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618798 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="77a9aa3d-75da-4642-9bce-01f57b9fe1da" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618811 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="073859e3-9fc9-45e3-a311-34411cea1556" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.618828 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c131c7bb-2e6f-4d78-a008-fd5ee2760542" containerName="registry-server" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.619450 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.622210 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.622417 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.622556 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.622677 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.643381 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w"] Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.699770 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8962\" (UniqueName: \"kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.699817 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.699973 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.802029 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.802090 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8962\" (UniqueName: \"kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.802119 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.807225 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.818817 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.825970 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8962\" (UniqueName: \"kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:08 crc kubenswrapper[4938]: I1122 11:08:08.941838 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:08:09 crc kubenswrapper[4938]: I1122 11:08:09.443939 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w"] Nov 22 11:08:09 crc kubenswrapper[4938]: I1122 11:08:09.524380 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" event={"ID":"66ca7351-72c9-401f-8602-f7a34033d228","Type":"ContainerStarted","Data":"871a2c4babc10e73ef3afb0fc1c6917e817da5696e45ac8be33df2888ce826f3"} Nov 22 11:08:10 crc kubenswrapper[4938]: I1122 11:08:10.546099 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" event={"ID":"66ca7351-72c9-401f-8602-f7a34033d228","Type":"ContainerStarted","Data":"d37fdb9f3fb1c50c0ead1333877efe22d1af87382acbf740594e12411fec5d64"} Nov 22 11:08:10 crc kubenswrapper[4938]: I1122 11:08:10.573480 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" podStartSLOduration=2.099855733 podStartE2EDuration="2.573456526s" podCreationTimestamp="2025-11-22 11:08:08 +0000 UTC" firstStartedPulling="2025-11-22 11:08:09.45569128 +0000 UTC m=+1821.923528679" lastFinishedPulling="2025-11-22 11:08:09.929292063 +0000 UTC m=+1822.397129472" observedRunningTime="2025-11-22 11:08:10.564092633 +0000 UTC m=+1823.031930032" watchObservedRunningTime="2025-11-22 11:08:10.573456526 +0000 UTC m=+1823.041293925" Nov 22 11:08:14 crc kubenswrapper[4938]: I1122 11:08:14.454132 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:08:14 crc kubenswrapper[4938]: E1122 11:08:14.454567 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:08:24 crc kubenswrapper[4938]: I1122 11:08:24.837336 4938 scope.go:117] "RemoveContainer" containerID="e134f011d7098d127ca2fd87a15ac5d118f8e48dbc13b2250eb5777ebd84b04c" Nov 22 11:08:24 crc kubenswrapper[4938]: I1122 11:08:24.889352 4938 scope.go:117] "RemoveContainer" containerID="bd3649f8c128786e6ac24b4865a8ecd81218dd97156b80e803c1410212cebcac" Nov 22 11:08:27 crc kubenswrapper[4938]: I1122 11:08:27.049278 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vs774"] Nov 22 11:08:27 crc kubenswrapper[4938]: I1122 11:08:27.059726 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-psj8d"] Nov 22 11:08:27 crc kubenswrapper[4938]: I1122 11:08:27.071424 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vs774"] Nov 22 11:08:27 crc kubenswrapper[4938]: I1122 11:08:27.082521 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-psj8d"] Nov 22 11:08:27 crc kubenswrapper[4938]: I1122 11:08:27.448635 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:08:27 crc kubenswrapper[4938]: E1122 11:08:27.448871 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:08:28 crc kubenswrapper[4938]: I1122 11:08:28.460510 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="973da40a-c63d-4e06-8750-c3d31d8b0abb" path="/var/lib/kubelet/pods/973da40a-c63d-4e06-8750-c3d31d8b0abb/volumes" Nov 22 11:08:28 crc kubenswrapper[4938]: I1122 11:08:28.461208 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4217bc8-0afd-4d83-9e43-41d4adf355df" path="/var/lib/kubelet/pods/e4217bc8-0afd-4d83-9e43-41d4adf355df/volumes" Nov 22 11:08:39 crc kubenswrapper[4938]: I1122 11:08:39.084203 4938 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-zg2km container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 11:08:39 crc kubenswrapper[4938]: I1122 11:08:39.084260 4938 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-zg2km container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 11:08:39 crc kubenswrapper[4938]: I1122 11:08:39.086157 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" podUID="c2334157-de9a-47fc-8dd1-9388ba35334a" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 11:08:39 crc kubenswrapper[4938]: I1122 11:08:39.086076 4938 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zg2km" podUID="c2334157-de9a-47fc-8dd1-9388ba35334a" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 11:08:39 crc kubenswrapper[4938]: I1122 11:08:39.447399 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:08:39 crc kubenswrapper[4938]: E1122 11:08:39.447708 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:08:51 crc kubenswrapper[4938]: I1122 11:08:51.447372 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:08:51 crc kubenswrapper[4938]: I1122 11:08:51.900720 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035"} Nov 22 11:08:54 crc kubenswrapper[4938]: I1122 11:08:54.038693 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-p9nkx"] Nov 22 11:08:54 crc kubenswrapper[4938]: I1122 11:08:54.048498 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-p9nkx"] Nov 22 11:08:54 crc kubenswrapper[4938]: I1122 11:08:54.457878 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c795cb12-352a-40bf-b828-4c4d16472eea" path="/var/lib/kubelet/pods/c795cb12-352a-40bf-b828-4c4d16472eea/volumes" Nov 22 11:08:56 crc kubenswrapper[4938]: I1122 11:08:56.033301 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-ddndr"] Nov 22 11:08:56 crc kubenswrapper[4938]: I1122 11:08:56.041230 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-ddndr"] Nov 22 11:08:56 crc kubenswrapper[4938]: I1122 11:08:56.459534 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="745ffa75-881b-4c0d-8f61-70d872617409" path="/var/lib/kubelet/pods/745ffa75-881b-4c0d-8f61-70d872617409/volumes" Nov 22 11:08:58 crc kubenswrapper[4938]: I1122 11:08:58.030472 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-qd8nf"] Nov 22 11:08:58 crc kubenswrapper[4938]: I1122 11:08:58.037316 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-qd8nf"] Nov 22 11:08:58 crc kubenswrapper[4938]: I1122 11:08:58.459385 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146" path="/var/lib/kubelet/pods/0ea4e6f6-fcbc-4a2c-9d13-d9be6762d146/volumes" Nov 22 11:09:23 crc kubenswrapper[4938]: I1122 11:09:23.177961 4938 generic.go:334] "Generic (PLEG): container finished" podID="66ca7351-72c9-401f-8602-f7a34033d228" containerID="d37fdb9f3fb1c50c0ead1333877efe22d1af87382acbf740594e12411fec5d64" exitCode=0 Nov 22 11:09:23 crc kubenswrapper[4938]: I1122 11:09:23.178053 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" event={"ID":"66ca7351-72c9-401f-8602-f7a34033d228","Type":"ContainerDied","Data":"d37fdb9f3fb1c50c0ead1333877efe22d1af87382acbf740594e12411fec5d64"} Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.603247 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.678976 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory\") pod \"66ca7351-72c9-401f-8602-f7a34033d228\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.679246 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8962\" (UniqueName: \"kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962\") pod \"66ca7351-72c9-401f-8602-f7a34033d228\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.679322 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key\") pod \"66ca7351-72c9-401f-8602-f7a34033d228\" (UID: \"66ca7351-72c9-401f-8602-f7a34033d228\") " Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.684652 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962" (OuterVolumeSpecName: "kube-api-access-x8962") pod "66ca7351-72c9-401f-8602-f7a34033d228" (UID: "66ca7351-72c9-401f-8602-f7a34033d228"). InnerVolumeSpecName "kube-api-access-x8962". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.707868 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "66ca7351-72c9-401f-8602-f7a34033d228" (UID: "66ca7351-72c9-401f-8602-f7a34033d228"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.710233 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory" (OuterVolumeSpecName: "inventory") pod "66ca7351-72c9-401f-8602-f7a34033d228" (UID: "66ca7351-72c9-401f-8602-f7a34033d228"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.782983 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.783027 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8962\" (UniqueName: \"kubernetes.io/projected/66ca7351-72c9-401f-8602-f7a34033d228-kube-api-access-x8962\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:24 crc kubenswrapper[4938]: I1122 11:09:24.783040 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66ca7351-72c9-401f-8602-f7a34033d228-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.004040 4938 scope.go:117] "RemoveContainer" containerID="3a412995d5c50d3471d0cd86652e95fc96f87a1488096ee3caf8f67ee195d9a5" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.048116 4938 scope.go:117] "RemoveContainer" containerID="3ecdb9a9624006bc482603fda8692c2a9a85fcc542d3c6c18df6662201098e61" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.073585 4938 scope.go:117] "RemoveContainer" containerID="8bac59801cf21b8548fe8306bf06f75f3d14cd3e9f59ae1887aa1b45baada354" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.109805 4938 scope.go:117] "RemoveContainer" containerID="9dcadf332c4dbc3fe26ae6f5741400c64b9a756792a955219ca1d3c0a9336484" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.156951 4938 scope.go:117] "RemoveContainer" containerID="0a71de05781e1589ff17fce11d007b4f1ded630bc326b0b75541472e80c3f670" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.198662 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" event={"ID":"66ca7351-72c9-401f-8602-f7a34033d228","Type":"ContainerDied","Data":"871a2c4babc10e73ef3afb0fc1c6917e817da5696e45ac8be33df2888ce826f3"} Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.198699 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="871a2c4babc10e73ef3afb0fc1c6917e817da5696e45ac8be33df2888ce826f3" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.198758 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.280420 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c"] Nov 22 11:09:25 crc kubenswrapper[4938]: E1122 11:09:25.280866 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66ca7351-72c9-401f-8602-f7a34033d228" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.280887 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="66ca7351-72c9-401f-8602-f7a34033d228" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.281134 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="66ca7351-72c9-401f-8602-f7a34033d228" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.281968 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.285940 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.286174 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.286209 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.288472 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c"] Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.308457 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.392037 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.392154 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.392287 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlzkn\" (UniqueName: \"kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.493667 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlzkn\" (UniqueName: \"kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.493728 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.493822 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.497852 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.497900 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.511734 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlzkn\" (UniqueName: \"kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-46d5c\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:25 crc kubenswrapper[4938]: I1122 11:09:25.628438 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:26 crc kubenswrapper[4938]: I1122 11:09:26.116551 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c"] Nov 22 11:09:26 crc kubenswrapper[4938]: I1122 11:09:26.212372 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" event={"ID":"595a1412-b3d2-40ba-bb26-98cd27d79480","Type":"ContainerStarted","Data":"03a373a31897e3d2b9dc768da6693b3015732ab6ca0bb125a597dab0028ec54a"} Nov 22 11:09:27 crc kubenswrapper[4938]: I1122 11:09:27.224700 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" event={"ID":"595a1412-b3d2-40ba-bb26-98cd27d79480","Type":"ContainerStarted","Data":"6737177c32a9844fb00e0adbf5ed34e8d13ac045ea75596f3d6f025e1efef6c8"} Nov 22 11:09:27 crc kubenswrapper[4938]: I1122 11:09:27.248624 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" podStartSLOduration=1.8178725839999998 podStartE2EDuration="2.248604821s" podCreationTimestamp="2025-11-22 11:09:25 +0000 UTC" firstStartedPulling="2025-11-22 11:09:26.126778474 +0000 UTC m=+1898.594615863" lastFinishedPulling="2025-11-22 11:09:26.557510701 +0000 UTC m=+1899.025348100" observedRunningTime="2025-11-22 11:09:27.242166261 +0000 UTC m=+1899.710003660" watchObservedRunningTime="2025-11-22 11:09:27.248604821 +0000 UTC m=+1899.716442220" Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.037740 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-7lllj"] Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.046227 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-j5hgn"] Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.053395 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-v28zg"] Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.061104 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-7lllj"] Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.068636 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-j5hgn"] Nov 22 11:09:29 crc kubenswrapper[4938]: I1122 11:09:29.075386 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-v28zg"] Nov 22 11:09:30 crc kubenswrapper[4938]: I1122 11:09:30.456637 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8481f6ff-a8d4-40ea-80b0-1076f6b60c61" path="/var/lib/kubelet/pods/8481f6ff-a8d4-40ea-80b0-1076f6b60c61/volumes" Nov 22 11:09:30 crc kubenswrapper[4938]: I1122 11:09:30.457625 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8cdfce8-5355-49f9-997d-04cac912ca12" path="/var/lib/kubelet/pods/a8cdfce8-5355-49f9-997d-04cac912ca12/volumes" Nov 22 11:09:30 crc kubenswrapper[4938]: I1122 11:09:30.458109 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dea066c0-8c10-4c73-8f35-4ad99c4fd251" path="/var/lib/kubelet/pods/dea066c0-8c10-4c73-8f35-4ad99c4fd251/volumes" Nov 22 11:09:32 crc kubenswrapper[4938]: I1122 11:09:32.271228 4938 generic.go:334] "Generic (PLEG): container finished" podID="595a1412-b3d2-40ba-bb26-98cd27d79480" containerID="6737177c32a9844fb00e0adbf5ed34e8d13ac045ea75596f3d6f025e1efef6c8" exitCode=0 Nov 22 11:09:32 crc kubenswrapper[4938]: I1122 11:09:32.271318 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" event={"ID":"595a1412-b3d2-40ba-bb26-98cd27d79480","Type":"ContainerDied","Data":"6737177c32a9844fb00e0adbf5ed34e8d13ac045ea75596f3d6f025e1efef6c8"} Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.659011 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.768750 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory\") pod \"595a1412-b3d2-40ba-bb26-98cd27d79480\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.768856 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key\") pod \"595a1412-b3d2-40ba-bb26-98cd27d79480\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.768930 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlzkn\" (UniqueName: \"kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn\") pod \"595a1412-b3d2-40ba-bb26-98cd27d79480\" (UID: \"595a1412-b3d2-40ba-bb26-98cd27d79480\") " Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.775396 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn" (OuterVolumeSpecName: "kube-api-access-qlzkn") pod "595a1412-b3d2-40ba-bb26-98cd27d79480" (UID: "595a1412-b3d2-40ba-bb26-98cd27d79480"). InnerVolumeSpecName "kube-api-access-qlzkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.796185 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory" (OuterVolumeSpecName: "inventory") pod "595a1412-b3d2-40ba-bb26-98cd27d79480" (UID: "595a1412-b3d2-40ba-bb26-98cd27d79480"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.805075 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "595a1412-b3d2-40ba-bb26-98cd27d79480" (UID: "595a1412-b3d2-40ba-bb26-98cd27d79480"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.871774 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.871817 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/595a1412-b3d2-40ba-bb26-98cd27d79480-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:33 crc kubenswrapper[4938]: I1122 11:09:33.871832 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlzkn\" (UniqueName: \"kubernetes.io/projected/595a1412-b3d2-40ba-bb26-98cd27d79480-kube-api-access-qlzkn\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.287668 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" event={"ID":"595a1412-b3d2-40ba-bb26-98cd27d79480","Type":"ContainerDied","Data":"03a373a31897e3d2b9dc768da6693b3015732ab6ca0bb125a597dab0028ec54a"} Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.288114 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03a373a31897e3d2b9dc768da6693b3015732ab6ca0bb125a597dab0028ec54a" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.287794 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-46d5c" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.373590 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5"] Nov 22 11:09:34 crc kubenswrapper[4938]: E1122 11:09:34.374099 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="595a1412-b3d2-40ba-bb26-98cd27d79480" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.374136 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="595a1412-b3d2-40ba-bb26-98cd27d79480" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.374414 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="595a1412-b3d2-40ba-bb26-98cd27d79480" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.375237 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.378491 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.378891 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.379140 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.379275 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.384212 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5"] Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.484647 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.484892 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.484982 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpc9m\" (UniqueName: \"kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.588109 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.588206 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpc9m\" (UniqueName: \"kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.588341 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.594344 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.600690 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.605174 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpc9m\" (UniqueName: \"kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-255m5\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:34 crc kubenswrapper[4938]: I1122 11:09:34.702985 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:09:35 crc kubenswrapper[4938]: I1122 11:09:35.394769 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5"] Nov 22 11:09:35 crc kubenswrapper[4938]: W1122 11:09:35.399189 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod578cbfd4_2f90_4d71_ac4f_ccdb9f00629f.slice/crio-36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b WatchSource:0}: Error finding container 36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b: Status 404 returned error can't find the container with id 36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b Nov 22 11:09:36 crc kubenswrapper[4938]: I1122 11:09:36.304607 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" event={"ID":"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f","Type":"ContainerStarted","Data":"aa3ba63a1c4ba6e5e37c21e78302c482a7e9652855570abff1791b3a0f796a7c"} Nov 22 11:09:36 crc kubenswrapper[4938]: I1122 11:09:36.305178 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" event={"ID":"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f","Type":"ContainerStarted","Data":"36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b"} Nov 22 11:09:36 crc kubenswrapper[4938]: I1122 11:09:36.323881 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" podStartSLOduration=1.936957772 podStartE2EDuration="2.3238644s" podCreationTimestamp="2025-11-22 11:09:34 +0000 UTC" firstStartedPulling="2025-11-22 11:09:35.401624814 +0000 UTC m=+1907.869462203" lastFinishedPulling="2025-11-22 11:09:35.788531432 +0000 UTC m=+1908.256368831" observedRunningTime="2025-11-22 11:09:36.320173008 +0000 UTC m=+1908.788010407" watchObservedRunningTime="2025-11-22 11:09:36.3238644 +0000 UTC m=+1908.791701819" Nov 22 11:09:39 crc kubenswrapper[4938]: I1122 11:09:39.031801 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-b693-account-create-rgkl7"] Nov 22 11:09:39 crc kubenswrapper[4938]: I1122 11:09:39.042902 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-b693-account-create-rgkl7"] Nov 22 11:09:39 crc kubenswrapper[4938]: I1122 11:09:39.054514 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-3f44-account-create-vhdtx"] Nov 22 11:09:39 crc kubenswrapper[4938]: I1122 11:09:39.065703 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-3f44-account-create-vhdtx"] Nov 22 11:09:40 crc kubenswrapper[4938]: I1122 11:09:40.050497 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-3e6e-account-create-wpvvp"] Nov 22 11:09:40 crc kubenswrapper[4938]: I1122 11:09:40.062666 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-3e6e-account-create-wpvvp"] Nov 22 11:09:40 crc kubenswrapper[4938]: I1122 11:09:40.458989 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91833636-6232-43e0-96e4-13ab0be6dbbe" path="/var/lib/kubelet/pods/91833636-6232-43e0-96e4-13ab0be6dbbe/volumes" Nov 22 11:09:40 crc kubenswrapper[4938]: I1122 11:09:40.459688 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d38d750-11b4-4d05-a62b-86eb84910f96" path="/var/lib/kubelet/pods/9d38d750-11b4-4d05-a62b-86eb84910f96/volumes" Nov 22 11:09:40 crc kubenswrapper[4938]: I1122 11:09:40.460157 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fca19b3c-65ae-4c37-99c6-bc28789b64fe" path="/var/lib/kubelet/pods/fca19b3c-65ae-4c37-99c6-bc28789b64fe/volumes" Nov 22 11:10:05 crc kubenswrapper[4938]: I1122 11:10:05.047566 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-n264b"] Nov 22 11:10:05 crc kubenswrapper[4938]: I1122 11:10:05.057557 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-n264b"] Nov 22 11:10:06 crc kubenswrapper[4938]: I1122 11:10:06.458364 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ec84446-c040-420f-bcb7-cfb23ec96eb9" path="/var/lib/kubelet/pods/9ec84446-c040-420f-bcb7-cfb23ec96eb9/volumes" Nov 22 11:10:11 crc kubenswrapper[4938]: I1122 11:10:11.605027 4938 generic.go:334] "Generic (PLEG): container finished" podID="578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" containerID="aa3ba63a1c4ba6e5e37c21e78302c482a7e9652855570abff1791b3a0f796a7c" exitCode=0 Nov 22 11:10:11 crc kubenswrapper[4938]: I1122 11:10:11.605731 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" event={"ID":"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f","Type":"ContainerDied","Data":"aa3ba63a1c4ba6e5e37c21e78302c482a7e9652855570abff1791b3a0f796a7c"} Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.041138 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.138786 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory\") pod \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.139504 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpc9m\" (UniqueName: \"kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m\") pod \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.139656 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key\") pod \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\" (UID: \"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f\") " Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.154194 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m" (OuterVolumeSpecName: "kube-api-access-bpc9m") pod "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" (UID: "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f"). InnerVolumeSpecName "kube-api-access-bpc9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.166776 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory" (OuterVolumeSpecName: "inventory") pod "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" (UID: "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.166813 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" (UID: "578cbfd4-2f90-4d71-ac4f-ccdb9f00629f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.242365 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpc9m\" (UniqueName: \"kubernetes.io/projected/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-kube-api-access-bpc9m\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.242431 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.242442 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/578cbfd4-2f90-4d71-ac4f-ccdb9f00629f-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.625017 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" event={"ID":"578cbfd4-2f90-4d71-ac4f-ccdb9f00629f","Type":"ContainerDied","Data":"36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b"} Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.625076 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36928a58ce6158d1ffa15cfb87d7605ce6969b7c8ba1ed26213e101dcd36cb9b" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.625094 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-255m5" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.695717 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz"] Nov 22 11:10:13 crc kubenswrapper[4938]: E1122 11:10:13.696131 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.696149 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.696372 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="578cbfd4-2f90-4d71-ac4f-ccdb9f00629f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.697031 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.699069 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.699100 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.699108 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.702338 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.721284 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz"] Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.750745 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.750803 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9j6c\" (UniqueName: \"kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.750883 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.852701 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.852758 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9j6c\" (UniqueName: \"kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.852803 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.856374 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.857564 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:13 crc kubenswrapper[4938]: I1122 11:10:13.870319 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9j6c\" (UniqueName: \"kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:14 crc kubenswrapper[4938]: I1122 11:10:14.018448 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:10:14 crc kubenswrapper[4938]: I1122 11:10:14.501497 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz"] Nov 22 11:10:14 crc kubenswrapper[4938]: I1122 11:10:14.633586 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" event={"ID":"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296","Type":"ContainerStarted","Data":"78d53463905ed699b804c1647296321ac1e333d3acceec2bc8abb7e5839598fd"} Nov 22 11:10:15 crc kubenswrapper[4938]: I1122 11:10:15.646853 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" event={"ID":"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296","Type":"ContainerStarted","Data":"9fc2a6c36374b7754bff250eb172240b892a246820a9c3a8cdbded505807cd30"} Nov 22 11:10:15 crc kubenswrapper[4938]: I1122 11:10:15.691199 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" podStartSLOduration=2.264998227 podStartE2EDuration="2.69117389s" podCreationTimestamp="2025-11-22 11:10:13 +0000 UTC" firstStartedPulling="2025-11-22 11:10:14.520058369 +0000 UTC m=+1946.987895778" lastFinishedPulling="2025-11-22 11:10:14.946234042 +0000 UTC m=+1947.414071441" observedRunningTime="2025-11-22 11:10:15.681481987 +0000 UTC m=+1948.149319386" watchObservedRunningTime="2025-11-22 11:10:15.69117389 +0000 UTC m=+1948.159011309" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.346407 4938 scope.go:117] "RemoveContainer" containerID="50c5a7157c9ddd89ba8992b92c14bbfa469242b74f0bb1910e95a8299f64e21d" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.399730 4938 scope.go:117] "RemoveContainer" containerID="352b329b5028da50f16f1ba670b49148c2ecbbb419fab12396c5a995adf7f81d" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.434994 4938 scope.go:117] "RemoveContainer" containerID="4f4d97eda9b4f8c52b41dd2d7497364cf732b981472e439d69e60419c80776f8" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.484026 4938 scope.go:117] "RemoveContainer" containerID="ebb74c875b26fd6b538c39832800c7a343ddb88cc06ca0ab9785e05d5e8800f4" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.530891 4938 scope.go:117] "RemoveContainer" containerID="a3804884fcea569815fe4083170fa6ea6f9a2c8270bcbd6dfb14a40a296af1d2" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.569015 4938 scope.go:117] "RemoveContainer" containerID="05901d070c3e91e022199beccc8942e050f0e702038e1b0c3b7d37c1d300c860" Nov 22 11:10:25 crc kubenswrapper[4938]: I1122 11:10:25.613598 4938 scope.go:117] "RemoveContainer" containerID="b7ed6d27b471bd500f9c6013c674caf20708f72a3265ed9b7f1794d10886d0bd" Nov 22 11:10:30 crc kubenswrapper[4938]: I1122 11:10:30.037286 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-5mrph"] Nov 22 11:10:30 crc kubenswrapper[4938]: I1122 11:10:30.044132 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-5mrph"] Nov 22 11:10:30 crc kubenswrapper[4938]: I1122 11:10:30.458740 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e" path="/var/lib/kubelet/pods/7bd64b06-984f-45a0-a6f0-2ab6a6d2cf9e/volumes" Nov 22 11:10:34 crc kubenswrapper[4938]: I1122 11:10:34.025689 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dvwrn"] Nov 22 11:10:34 crc kubenswrapper[4938]: I1122 11:10:34.034333 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-dvwrn"] Nov 22 11:10:34 crc kubenswrapper[4938]: I1122 11:10:34.457399 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b51f59c0-79bd-43ee-b657-1e2aa209c3af" path="/var/lib/kubelet/pods/b51f59c0-79bd-43ee-b657-1e2aa209c3af/volumes" Nov 22 11:11:01 crc kubenswrapper[4938]: I1122 11:11:01.097052 4938 generic.go:334] "Generic (PLEG): container finished" podID="85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" containerID="9fc2a6c36374b7754bff250eb172240b892a246820a9c3a8cdbded505807cd30" exitCode=0 Nov 22 11:11:01 crc kubenswrapper[4938]: I1122 11:11:01.097146 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" event={"ID":"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296","Type":"ContainerDied","Data":"9fc2a6c36374b7754bff250eb172240b892a246820a9c3a8cdbded505807cd30"} Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.511274 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.653494 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key\") pod \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.653561 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory\") pod \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.653766 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9j6c\" (UniqueName: \"kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c\") pod \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\" (UID: \"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296\") " Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.658633 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c" (OuterVolumeSpecName: "kube-api-access-k9j6c") pod "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" (UID: "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296"). InnerVolumeSpecName "kube-api-access-k9j6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.678700 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory" (OuterVolumeSpecName: "inventory") pod "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" (UID: "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.679485 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" (UID: "85b362e8-25b1-4ed4-8c6c-8fdb1c84e296"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.755742 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9j6c\" (UniqueName: \"kubernetes.io/projected/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-kube-api-access-k9j6c\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.755771 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:02 crc kubenswrapper[4938]: I1122 11:11:02.755781 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/85b362e8-25b1-4ed4-8c6c-8fdb1c84e296-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.113430 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" event={"ID":"85b362e8-25b1-4ed4-8c6c-8fdb1c84e296","Type":"ContainerDied","Data":"78d53463905ed699b804c1647296321ac1e333d3acceec2bc8abb7e5839598fd"} Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.113474 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78d53463905ed699b804c1647296321ac1e333d3acceec2bc8abb7e5839598fd" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.113537 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.194953 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9895f"] Nov 22 11:11:03 crc kubenswrapper[4938]: E1122 11:11:03.195340 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.195364 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.195592 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="85b362e8-25b1-4ed4-8c6c-8fdb1c84e296" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.196250 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.202563 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.203100 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.203148 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.203253 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.214282 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9895f"] Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.265388 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.265502 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.265528 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfj9f\" (UniqueName: \"kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.366980 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.367138 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.367181 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfj9f\" (UniqueName: \"kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.374697 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.374783 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.386595 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfj9f\" (UniqueName: \"kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f\") pod \"ssh-known-hosts-edpm-deployment-9895f\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:03 crc kubenswrapper[4938]: I1122 11:11:03.516116 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:04 crc kubenswrapper[4938]: I1122 11:11:04.028037 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-9895f"] Nov 22 11:11:04 crc kubenswrapper[4938]: I1122 11:11:04.135054 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" event={"ID":"a66a985f-423c-4438-b9cc-ad5cbc582077","Type":"ContainerStarted","Data":"3f0c3d2866853f6657ac08ef79d0b022d2028e4912c51797a69c5e3deefc97f0"} Nov 22 11:11:05 crc kubenswrapper[4938]: I1122 11:11:05.143764 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" event={"ID":"a66a985f-423c-4438-b9cc-ad5cbc582077","Type":"ContainerStarted","Data":"147ac0ff15fcc1cf5b776e9ee90c43492794169e6034533df5afe00a12361d27"} Nov 22 11:11:05 crc kubenswrapper[4938]: I1122 11:11:05.160719 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" podStartSLOduration=1.799334055 podStartE2EDuration="2.160702575s" podCreationTimestamp="2025-11-22 11:11:03 +0000 UTC" firstStartedPulling="2025-11-22 11:11:04.039820843 +0000 UTC m=+1996.507658242" lastFinishedPulling="2025-11-22 11:11:04.401189363 +0000 UTC m=+1996.869026762" observedRunningTime="2025-11-22 11:11:05.158195093 +0000 UTC m=+1997.626032492" watchObservedRunningTime="2025-11-22 11:11:05.160702575 +0000 UTC m=+1997.628539974" Nov 22 11:11:11 crc kubenswrapper[4938]: I1122 11:11:11.192410 4938 generic.go:334] "Generic (PLEG): container finished" podID="a66a985f-423c-4438-b9cc-ad5cbc582077" containerID="147ac0ff15fcc1cf5b776e9ee90c43492794169e6034533df5afe00a12361d27" exitCode=0 Nov 22 11:11:11 crc kubenswrapper[4938]: I1122 11:11:11.192514 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" event={"ID":"a66a985f-423c-4438-b9cc-ad5cbc582077","Type":"ContainerDied","Data":"147ac0ff15fcc1cf5b776e9ee90c43492794169e6034533df5afe00a12361d27"} Nov 22 11:11:11 crc kubenswrapper[4938]: I1122 11:11:11.300368 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:11:11 crc kubenswrapper[4938]: I1122 11:11:11.300438 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.584681 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.734878 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfj9f\" (UniqueName: \"kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f\") pod \"a66a985f-423c-4438-b9cc-ad5cbc582077\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.735196 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0\") pod \"a66a985f-423c-4438-b9cc-ad5cbc582077\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.735352 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam\") pod \"a66a985f-423c-4438-b9cc-ad5cbc582077\" (UID: \"a66a985f-423c-4438-b9cc-ad5cbc582077\") " Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.744384 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f" (OuterVolumeSpecName: "kube-api-access-kfj9f") pod "a66a985f-423c-4438-b9cc-ad5cbc582077" (UID: "a66a985f-423c-4438-b9cc-ad5cbc582077"). InnerVolumeSpecName "kube-api-access-kfj9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.778197 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a66a985f-423c-4438-b9cc-ad5cbc582077" (UID: "a66a985f-423c-4438-b9cc-ad5cbc582077"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.785203 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "a66a985f-423c-4438-b9cc-ad5cbc582077" (UID: "a66a985f-423c-4438-b9cc-ad5cbc582077"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.838111 4938 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.838147 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a66a985f-423c-4438-b9cc-ad5cbc582077-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:12 crc kubenswrapper[4938]: I1122 11:11:12.838159 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfj9f\" (UniqueName: \"kubernetes.io/projected/a66a985f-423c-4438-b9cc-ad5cbc582077-kube-api-access-kfj9f\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.210455 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" event={"ID":"a66a985f-423c-4438-b9cc-ad5cbc582077","Type":"ContainerDied","Data":"3f0c3d2866853f6657ac08ef79d0b022d2028e4912c51797a69c5e3deefc97f0"} Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.210500 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f0c3d2866853f6657ac08ef79d0b022d2028e4912c51797a69c5e3deefc97f0" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.210537 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-9895f" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.299269 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th"] Nov 22 11:11:13 crc kubenswrapper[4938]: E1122 11:11:13.299832 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66a985f-423c-4438-b9cc-ad5cbc582077" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.299896 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66a985f-423c-4438-b9cc-ad5cbc582077" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.300189 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="a66a985f-423c-4438-b9cc-ad5cbc582077" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.300966 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.302733 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.303279 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.303644 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.303884 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.313116 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th"] Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.447508 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.447878 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.448277 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pshkg\" (UniqueName: \"kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.549817 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pshkg\" (UniqueName: \"kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.549892 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.549959 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.555441 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.558559 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.565623 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pshkg\" (UniqueName: \"kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-s94th\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:13 crc kubenswrapper[4938]: I1122 11:11:13.619127 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:14 crc kubenswrapper[4938]: I1122 11:11:14.144200 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th"] Nov 22 11:11:14 crc kubenswrapper[4938]: W1122 11:11:14.148321 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod146c45e8_f683_48dd_99b4_02d5eeab729d.slice/crio-074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9 WatchSource:0}: Error finding container 074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9: Status 404 returned error can't find the container with id 074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9 Nov 22 11:11:14 crc kubenswrapper[4938]: I1122 11:11:14.217935 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" event={"ID":"146c45e8-f683-48dd-99b4-02d5eeab729d","Type":"ContainerStarted","Data":"074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9"} Nov 22 11:11:15 crc kubenswrapper[4938]: I1122 11:11:15.038998 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-f4dlk"] Nov 22 11:11:15 crc kubenswrapper[4938]: I1122 11:11:15.046751 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-f4dlk"] Nov 22 11:11:15 crc kubenswrapper[4938]: I1122 11:11:15.227429 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" event={"ID":"146c45e8-f683-48dd-99b4-02d5eeab729d","Type":"ContainerStarted","Data":"6f6e04e84e8c06a030be4fb6d8e148fca688e4b4e4c9fcf5328c1f853395e31f"} Nov 22 11:11:15 crc kubenswrapper[4938]: I1122 11:11:15.241187 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" podStartSLOduration=1.8386507220000001 podStartE2EDuration="2.241166103s" podCreationTimestamp="2025-11-22 11:11:13 +0000 UTC" firstStartedPulling="2025-11-22 11:11:14.15138583 +0000 UTC m=+2006.619223229" lastFinishedPulling="2025-11-22 11:11:14.553901211 +0000 UTC m=+2007.021738610" observedRunningTime="2025-11-22 11:11:15.239709787 +0000 UTC m=+2007.707547196" watchObservedRunningTime="2025-11-22 11:11:15.241166103 +0000 UTC m=+2007.709003502" Nov 22 11:11:16 crc kubenswrapper[4938]: I1122 11:11:16.457882 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6b7624c-96ce-45d5-bc85-4549bc1c0988" path="/var/lib/kubelet/pods/a6b7624c-96ce-45d5-bc85-4549bc1c0988/volumes" Nov 22 11:11:22 crc kubenswrapper[4938]: I1122 11:11:22.285138 4938 generic.go:334] "Generic (PLEG): container finished" podID="146c45e8-f683-48dd-99b4-02d5eeab729d" containerID="6f6e04e84e8c06a030be4fb6d8e148fca688e4b4e4c9fcf5328c1f853395e31f" exitCode=0 Nov 22 11:11:22 crc kubenswrapper[4938]: I1122 11:11:22.285220 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" event={"ID":"146c45e8-f683-48dd-99b4-02d5eeab729d","Type":"ContainerDied","Data":"6f6e04e84e8c06a030be4fb6d8e148fca688e4b4e4c9fcf5328c1f853395e31f"} Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.670341 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.752634 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key\") pod \"146c45e8-f683-48dd-99b4-02d5eeab729d\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.778841 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "146c45e8-f683-48dd-99b4-02d5eeab729d" (UID: "146c45e8-f683-48dd-99b4-02d5eeab729d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.854807 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory\") pod \"146c45e8-f683-48dd-99b4-02d5eeab729d\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.855311 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pshkg\" (UniqueName: \"kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg\") pod \"146c45e8-f683-48dd-99b4-02d5eeab729d\" (UID: \"146c45e8-f683-48dd-99b4-02d5eeab729d\") " Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.855896 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.858518 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg" (OuterVolumeSpecName: "kube-api-access-pshkg") pod "146c45e8-f683-48dd-99b4-02d5eeab729d" (UID: "146c45e8-f683-48dd-99b4-02d5eeab729d"). InnerVolumeSpecName "kube-api-access-pshkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.878313 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory" (OuterVolumeSpecName: "inventory") pod "146c45e8-f683-48dd-99b4-02d5eeab729d" (UID: "146c45e8-f683-48dd-99b4-02d5eeab729d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.958248 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/146c45e8-f683-48dd-99b4-02d5eeab729d-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:23 crc kubenswrapper[4938]: I1122 11:11:23.958328 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pshkg\" (UniqueName: \"kubernetes.io/projected/146c45e8-f683-48dd-99b4-02d5eeab729d-kube-api-access-pshkg\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.311466 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" event={"ID":"146c45e8-f683-48dd-99b4-02d5eeab729d","Type":"ContainerDied","Data":"074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9"} Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.311506 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="074b95ff3b60ebe02d3c4d8d474925e4bbd4118d15c782b7bd935bc877bd1fa9" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.311557 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-s94th" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.382528 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72"] Nov 22 11:11:24 crc kubenswrapper[4938]: E1122 11:11:24.383223 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146c45e8-f683-48dd-99b4-02d5eeab729d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.383317 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="146c45e8-f683-48dd-99b4-02d5eeab729d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.383556 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="146c45e8-f683-48dd-99b4-02d5eeab729d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.384227 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.386641 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.387138 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.387391 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.388605 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.392365 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72"] Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.567406 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.567505 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbcs7\" (UniqueName: \"kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.567666 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.669639 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbcs7\" (UniqueName: \"kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.669817 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.669871 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.675029 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.675062 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.687313 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbcs7\" (UniqueName: \"kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:24 crc kubenswrapper[4938]: I1122 11:11:24.718494 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.225550 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72"] Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.234931 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.319805 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" event={"ID":"82ac5576-d4c3-4bb0-a2f3-2f6da7605821","Type":"ContainerStarted","Data":"9aa9183302fa568809325bc310732c91a423e5e19ee0d2d048b60ce080e04471"} Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.793951 4938 scope.go:117] "RemoveContainer" containerID="5dca54e774d1c8690cd3878e26ee81de2824078f8e56e21bfc120572170c418c" Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.869536 4938 scope.go:117] "RemoveContainer" containerID="9d3ed1a8030d80d9b4c97afdba43174ad2e28b921be9a2d0b4fb46a9e449d213" Nov 22 11:11:25 crc kubenswrapper[4938]: I1122 11:11:25.932247 4938 scope.go:117] "RemoveContainer" containerID="41c3eb3196d36b9546195316bf62b34db3524528d0497fe215cdf5f66211c445" Nov 22 11:11:26 crc kubenswrapper[4938]: I1122 11:11:26.330171 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" event={"ID":"82ac5576-d4c3-4bb0-a2f3-2f6da7605821","Type":"ContainerStarted","Data":"4fd958ad53f457c33f8c9ca873dd385ec92b290286731f6092cf5304100455e7"} Nov 22 11:11:26 crc kubenswrapper[4938]: I1122 11:11:26.352648 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" podStartSLOduration=1.8826471919999999 podStartE2EDuration="2.352628063s" podCreationTimestamp="2025-11-22 11:11:24 +0000 UTC" firstStartedPulling="2025-11-22 11:11:25.234733956 +0000 UTC m=+2017.702571355" lastFinishedPulling="2025-11-22 11:11:25.704714827 +0000 UTC m=+2018.172552226" observedRunningTime="2025-11-22 11:11:26.344818997 +0000 UTC m=+2018.812656396" watchObservedRunningTime="2025-11-22 11:11:26.352628063 +0000 UTC m=+2018.820465462" Nov 22 11:11:35 crc kubenswrapper[4938]: I1122 11:11:35.414330 4938 generic.go:334] "Generic (PLEG): container finished" podID="82ac5576-d4c3-4bb0-a2f3-2f6da7605821" containerID="4fd958ad53f457c33f8c9ca873dd385ec92b290286731f6092cf5304100455e7" exitCode=0 Nov 22 11:11:35 crc kubenswrapper[4938]: I1122 11:11:35.414432 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" event={"ID":"82ac5576-d4c3-4bb0-a2f3-2f6da7605821","Type":"ContainerDied","Data":"4fd958ad53f457c33f8c9ca873dd385ec92b290286731f6092cf5304100455e7"} Nov 22 11:11:36 crc kubenswrapper[4938]: I1122 11:11:36.867772 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.028652 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbcs7\" (UniqueName: \"kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7\") pod \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.028709 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key\") pod \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.028949 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory\") pod \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\" (UID: \"82ac5576-d4c3-4bb0-a2f3-2f6da7605821\") " Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.034748 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7" (OuterVolumeSpecName: "kube-api-access-dbcs7") pod "82ac5576-d4c3-4bb0-a2f3-2f6da7605821" (UID: "82ac5576-d4c3-4bb0-a2f3-2f6da7605821"). InnerVolumeSpecName "kube-api-access-dbcs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.056560 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory" (OuterVolumeSpecName: "inventory") pod "82ac5576-d4c3-4bb0-a2f3-2f6da7605821" (UID: "82ac5576-d4c3-4bb0-a2f3-2f6da7605821"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.065889 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "82ac5576-d4c3-4bb0-a2f3-2f6da7605821" (UID: "82ac5576-d4c3-4bb0-a2f3-2f6da7605821"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.131028 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbcs7\" (UniqueName: \"kubernetes.io/projected/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-kube-api-access-dbcs7\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.131061 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.131070 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ac5576-d4c3-4bb0-a2f3-2f6da7605821-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.437716 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" event={"ID":"82ac5576-d4c3-4bb0-a2f3-2f6da7605821","Type":"ContainerDied","Data":"9aa9183302fa568809325bc310732c91a423e5e19ee0d2d048b60ce080e04471"} Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.438084 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9aa9183302fa568809325bc310732c91a423e5e19ee0d2d048b60ce080e04471" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.437779 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.540512 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2"] Nov 22 11:11:37 crc kubenswrapper[4938]: E1122 11:11:37.541038 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ac5576-d4c3-4bb0-a2f3-2f6da7605821" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.541071 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ac5576-d4c3-4bb0-a2f3-2f6da7605821" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.541391 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ac5576-d4c3-4bb0-a2f3-2f6da7605821" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.542310 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.545626 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.545812 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.546595 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.546726 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.546857 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.546896 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.547212 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.547768 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.567532 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2"] Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.640831 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.640983 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641039 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641156 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641247 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp9hr\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641295 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641367 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641407 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641473 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641530 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641591 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641678 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641767 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.641859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744173 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744267 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744323 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744350 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744377 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744405 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744438 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744475 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp9hr\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744514 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744555 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744583 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744631 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744669 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.744700 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.749319 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.750553 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.750747 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.750978 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.751109 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.751158 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.751328 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.751802 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.752223 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.752519 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.753038 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.753613 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.755790 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.763009 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp9hr\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-rthm2\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:37 crc kubenswrapper[4938]: I1122 11:11:37.859051 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:11:39 crc kubenswrapper[4938]: I1122 11:11:39.433433 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2"] Nov 22 11:11:39 crc kubenswrapper[4938]: I1122 11:11:39.457787 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" event={"ID":"71e3a6a6-d91c-416c-9ec6-43429dd10097","Type":"ContainerStarted","Data":"9797b601fa022cd456863c31132dcb8334ac806e0c91a4828828edd1e2ac8dff"} Nov 22 11:11:40 crc kubenswrapper[4938]: I1122 11:11:40.467100 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" event={"ID":"71e3a6a6-d91c-416c-9ec6-43429dd10097","Type":"ContainerStarted","Data":"d04f56e307acece28cf7e266679ab5296786f7270753cac86a419a1f8b7293c6"} Nov 22 11:11:40 crc kubenswrapper[4938]: I1122 11:11:40.500848 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" podStartSLOduration=2.828233023 podStartE2EDuration="3.500824798s" podCreationTimestamp="2025-11-22 11:11:37 +0000 UTC" firstStartedPulling="2025-11-22 11:11:39.445540299 +0000 UTC m=+2031.913377698" lastFinishedPulling="2025-11-22 11:11:40.118132074 +0000 UTC m=+2032.585969473" observedRunningTime="2025-11-22 11:11:40.491832233 +0000 UTC m=+2032.959669632" watchObservedRunningTime="2025-11-22 11:11:40.500824798 +0000 UTC m=+2032.968662207" Nov 22 11:11:41 crc kubenswrapper[4938]: I1122 11:11:41.301132 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:11:41 crc kubenswrapper[4938]: I1122 11:11:41.301226 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.301281 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.301879 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.302021 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.302800 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.302853 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035" gracePeriod=600 Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.758458 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035" exitCode=0 Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.758543 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035"} Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.758774 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1"} Nov 22 11:12:11 crc kubenswrapper[4938]: I1122 11:12:11.758796 4938 scope.go:117] "RemoveContainer" containerID="e185e59237bd54f39780599fda4ede57409b6341f47c29b6155c80faefcdb86c" Nov 22 11:12:16 crc kubenswrapper[4938]: I1122 11:12:16.808626 4938 generic.go:334] "Generic (PLEG): container finished" podID="71e3a6a6-d91c-416c-9ec6-43429dd10097" containerID="d04f56e307acece28cf7e266679ab5296786f7270753cac86a419a1f8b7293c6" exitCode=0 Nov 22 11:12:16 crc kubenswrapper[4938]: I1122 11:12:16.808717 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" event={"ID":"71e3a6a6-d91c-416c-9ec6-43429dd10097","Type":"ContainerDied","Data":"d04f56e307acece28cf7e266679ab5296786f7270753cac86a419a1f8b7293c6"} Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.197789 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.374687 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.375030 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp9hr\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.375133 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.375252 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.375377 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.375551 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376117 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376314 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376436 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376563 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376694 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.376805 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.377311 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.377419 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle\") pod \"71e3a6a6-d91c-416c-9ec6-43429dd10097\" (UID: \"71e3a6a6-d91c-416c-9ec6-43429dd10097\") " Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.381725 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr" (OuterVolumeSpecName: "kube-api-access-qp9hr") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "kube-api-access-qp9hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.382582 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.382812 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.382828 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.383111 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.383614 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.384425 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.385523 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.385882 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.386576 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.388055 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.404370 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.410371 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.412110 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory" (OuterVolumeSpecName: "inventory") pod "71e3a6a6-d91c-416c-9ec6-43429dd10097" (UID: "71e3a6a6-d91c-416c-9ec6-43429dd10097"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.479997 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480031 4938 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480045 4938 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480055 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480065 4938 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480076 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480085 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480096 4938 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480106 4938 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480114 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp9hr\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-kube-api-access-qp9hr\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480123 4938 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480131 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/71e3a6a6-d91c-416c-9ec6-43429dd10097-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480139 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.480149 4938 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71e3a6a6-d91c-416c-9ec6-43429dd10097-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.830750 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" event={"ID":"71e3a6a6-d91c-416c-9ec6-43429dd10097","Type":"ContainerDied","Data":"9797b601fa022cd456863c31132dcb8334ac806e0c91a4828828edd1e2ac8dff"} Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.830795 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9797b601fa022cd456863c31132dcb8334ac806e0c91a4828828edd1e2ac8dff" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.830870 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-rthm2" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.967182 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr"] Nov 22 11:12:18 crc kubenswrapper[4938]: E1122 11:12:18.967925 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71e3a6a6-d91c-416c-9ec6-43429dd10097" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.967943 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="71e3a6a6-d91c-416c-9ec6-43429dd10097" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.968360 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="71e3a6a6-d91c-416c-9ec6-43429dd10097" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.969357 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.975542 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.975596 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.975974 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.976153 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.980026 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:12:18 crc kubenswrapper[4938]: I1122 11:12:18.995038 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr"] Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.092515 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfm5w\" (UniqueName: \"kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.092662 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.092735 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.092907 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.092998 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.194972 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.195510 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.195548 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.195617 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfm5w\" (UniqueName: \"kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.195691 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.197124 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.202863 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.203212 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.204139 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.225532 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfm5w\" (UniqueName: \"kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wn6rr\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.308996 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:12:19 crc kubenswrapper[4938]: I1122 11:12:19.896333 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr"] Nov 22 11:12:20 crc kubenswrapper[4938]: I1122 11:12:20.848102 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" event={"ID":"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec","Type":"ContainerStarted","Data":"87de3e248ca73014242fdee91ece4d88063c3982d2b6a336ae76a9b8fa909069"} Nov 22 11:12:20 crc kubenswrapper[4938]: I1122 11:12:20.848779 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" event={"ID":"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec","Type":"ContainerStarted","Data":"eaf591ef943f41495f5acfb296c274d377733bde4128bf5d6118b36c0f985c95"} Nov 22 11:12:20 crc kubenswrapper[4938]: I1122 11:12:20.866256 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" podStartSLOduration=2.423565144 podStartE2EDuration="2.866238561s" podCreationTimestamp="2025-11-22 11:12:18 +0000 UTC" firstStartedPulling="2025-11-22 11:12:19.910096044 +0000 UTC m=+2072.377933443" lastFinishedPulling="2025-11-22 11:12:20.352769461 +0000 UTC m=+2072.820606860" observedRunningTime="2025-11-22 11:12:20.865085992 +0000 UTC m=+2073.332923391" watchObservedRunningTime="2025-11-22 11:12:20.866238561 +0000 UTC m=+2073.334075960" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.430953 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.444299 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.444746 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.515301 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fjrt\" (UniqueName: \"kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.516135 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.516450 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.618163 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.618331 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fjrt\" (UniqueName: \"kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.618397 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.618954 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.619221 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.638513 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fjrt\" (UniqueName: \"kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt\") pod \"redhat-operators-9fddp\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:06 crc kubenswrapper[4938]: I1122 11:13:06.791440 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:07 crc kubenswrapper[4938]: I1122 11:13:07.274671 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:08 crc kubenswrapper[4938]: I1122 11:13:08.274752 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerID="85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac" exitCode=0 Nov 22 11:13:08 crc kubenswrapper[4938]: I1122 11:13:08.274803 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerDied","Data":"85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac"} Nov 22 11:13:08 crc kubenswrapper[4938]: I1122 11:13:08.275157 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerStarted","Data":"4b819d1980dd3c87d6c152c4897ef5a6fe2d7a4b156d79918f70fcefdeb1c8c9"} Nov 22 11:13:09 crc kubenswrapper[4938]: I1122 11:13:09.284727 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerStarted","Data":"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af"} Nov 22 11:13:10 crc kubenswrapper[4938]: I1122 11:13:10.294686 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerID="2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af" exitCode=0 Nov 22 11:13:10 crc kubenswrapper[4938]: I1122 11:13:10.294760 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerDied","Data":"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af"} Nov 22 11:13:12 crc kubenswrapper[4938]: I1122 11:13:12.312581 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerStarted","Data":"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c"} Nov 22 11:13:12 crc kubenswrapper[4938]: I1122 11:13:12.337271 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9fddp" podStartSLOduration=3.892502576 podStartE2EDuration="6.337249115s" podCreationTimestamp="2025-11-22 11:13:06 +0000 UTC" firstStartedPulling="2025-11-22 11:13:08.277731494 +0000 UTC m=+2120.745568893" lastFinishedPulling="2025-11-22 11:13:10.722478033 +0000 UTC m=+2123.190315432" observedRunningTime="2025-11-22 11:13:12.330030055 +0000 UTC m=+2124.797867474" watchObservedRunningTime="2025-11-22 11:13:12.337249115 +0000 UTC m=+2124.805086514" Nov 22 11:13:16 crc kubenswrapper[4938]: I1122 11:13:16.791636 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:16 crc kubenswrapper[4938]: I1122 11:13:16.792452 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:17 crc kubenswrapper[4938]: I1122 11:13:17.850894 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9fddp" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="registry-server" probeResult="failure" output=< Nov 22 11:13:17 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 11:13:17 crc kubenswrapper[4938]: > Nov 22 11:13:19 crc kubenswrapper[4938]: I1122 11:13:19.373427 4938 generic.go:334] "Generic (PLEG): container finished" podID="0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" containerID="87de3e248ca73014242fdee91ece4d88063c3982d2b6a336ae76a9b8fa909069" exitCode=0 Nov 22 11:13:19 crc kubenswrapper[4938]: I1122 11:13:19.373539 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" event={"ID":"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec","Type":"ContainerDied","Data":"87de3e248ca73014242fdee91ece4d88063c3982d2b6a336ae76a9b8fa909069"} Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.783483 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.892740 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfm5w\" (UniqueName: \"kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w\") pod \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.892789 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle\") pod \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.892936 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0\") pod \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.893019 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key\") pod \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.893095 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory\") pod \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\" (UID: \"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec\") " Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.898673 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" (UID: "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.899133 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w" (OuterVolumeSpecName: "kube-api-access-qfm5w") pod "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" (UID: "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec"). InnerVolumeSpecName "kube-api-access-qfm5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.920222 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" (UID: "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.921610 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" (UID: "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.922092 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory" (OuterVolumeSpecName: "inventory") pod "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" (UID: "0c6fb3fe-7488-44ba-a5fc-d24f04f40dec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.995009 4938 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.995053 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.995068 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.995086 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfm5w\" (UniqueName: \"kubernetes.io/projected/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-kube-api-access-qfm5w\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:20 crc kubenswrapper[4938]: I1122 11:13:20.995099 4938 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c6fb3fe-7488-44ba-a5fc-d24f04f40dec-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.394545 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" event={"ID":"0c6fb3fe-7488-44ba-a5fc-d24f04f40dec","Type":"ContainerDied","Data":"eaf591ef943f41495f5acfb296c274d377733bde4128bf5d6118b36c0f985c95"} Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.394586 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaf591ef943f41495f5acfb296c274d377733bde4128bf5d6118b36c0f985c95" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.394596 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wn6rr" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.495786 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9"] Nov 22 11:13:21 crc kubenswrapper[4938]: E1122 11:13:21.496355 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.496372 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.496656 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c6fb3fe-7488-44ba-a5fc-d24f04f40dec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.497437 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.500115 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.500422 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.501093 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.503509 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.503711 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.503872 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.508771 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9"] Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.604782 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.604896 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.604940 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.605021 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.605496 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.605620 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tff2d\" (UniqueName: \"kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708310 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708390 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tff2d\" (UniqueName: \"kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708465 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708537 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708567 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.708636 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.712392 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.712457 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.713234 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.714253 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.722162 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.727132 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tff2d\" (UniqueName: \"kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:21 crc kubenswrapper[4938]: I1122 11:13:21.822297 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:13:22 crc kubenswrapper[4938]: I1122 11:13:22.341782 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9"] Nov 22 11:13:22 crc kubenswrapper[4938]: I1122 11:13:22.410088 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" event={"ID":"2dc63cc5-838b-4bdf-86fe-46ede44788b3","Type":"ContainerStarted","Data":"ec6db715e23e543402a5a8bd80695395a7fe36e8f62718f27ad4dee701b4ecd0"} Nov 22 11:13:23 crc kubenswrapper[4938]: I1122 11:13:23.422354 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" event={"ID":"2dc63cc5-838b-4bdf-86fe-46ede44788b3","Type":"ContainerStarted","Data":"c9edbb167d1147a3a4ea67af5e09bd7d15ec6c34c1b1e937ca082de0170db6b6"} Nov 22 11:13:23 crc kubenswrapper[4938]: I1122 11:13:23.450586 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" podStartSLOduration=1.963764969 podStartE2EDuration="2.450557091s" podCreationTimestamp="2025-11-22 11:13:21 +0000 UTC" firstStartedPulling="2025-11-22 11:13:22.350447258 +0000 UTC m=+2134.818284657" lastFinishedPulling="2025-11-22 11:13:22.83723937 +0000 UTC m=+2135.305076779" observedRunningTime="2025-11-22 11:13:23.441176756 +0000 UTC m=+2135.909014175" watchObservedRunningTime="2025-11-22 11:13:23.450557091 +0000 UTC m=+2135.918394490" Nov 22 11:13:26 crc kubenswrapper[4938]: I1122 11:13:26.839001 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:26 crc kubenswrapper[4938]: I1122 11:13:26.894000 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:27 crc kubenswrapper[4938]: I1122 11:13:27.083340 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:28 crc kubenswrapper[4938]: I1122 11:13:28.464376 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9fddp" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="registry-server" containerID="cri-o://4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c" gracePeriod=2 Nov 22 11:13:28 crc kubenswrapper[4938]: I1122 11:13:28.995511 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.080732 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content\") pod \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.080785 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fjrt\" (UniqueName: \"kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt\") pod \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.080882 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities\") pod \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\" (UID: \"9c1ec982-1ecd-4341-9552-44f2b3da4f3a\") " Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.082165 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities" (OuterVolumeSpecName: "utilities") pod "9c1ec982-1ecd-4341-9552-44f2b3da4f3a" (UID: "9c1ec982-1ecd-4341-9552-44f2b3da4f3a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.087149 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt" (OuterVolumeSpecName: "kube-api-access-7fjrt") pod "9c1ec982-1ecd-4341-9552-44f2b3da4f3a" (UID: "9c1ec982-1ecd-4341-9552-44f2b3da4f3a"). InnerVolumeSpecName "kube-api-access-7fjrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.181724 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c1ec982-1ecd-4341-9552-44f2b3da4f3a" (UID: "9c1ec982-1ecd-4341-9552-44f2b3da4f3a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.183644 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.183683 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fjrt\" (UniqueName: \"kubernetes.io/projected/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-kube-api-access-7fjrt\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.183698 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c1ec982-1ecd-4341-9552-44f2b3da4f3a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.473581 4938 generic.go:334] "Generic (PLEG): container finished" podID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerID="4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c" exitCode=0 Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.473629 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerDied","Data":"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c"} Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.473659 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9fddp" event={"ID":"9c1ec982-1ecd-4341-9552-44f2b3da4f3a","Type":"ContainerDied","Data":"4b819d1980dd3c87d6c152c4897ef5a6fe2d7a4b156d79918f70fcefdeb1c8c9"} Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.473691 4938 scope.go:117] "RemoveContainer" containerID="4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.475000 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9fddp" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.511254 4938 scope.go:117] "RemoveContainer" containerID="2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.514462 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.524538 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9fddp"] Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.533622 4938 scope.go:117] "RemoveContainer" containerID="85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.599845 4938 scope.go:117] "RemoveContainer" containerID="4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c" Nov 22 11:13:29 crc kubenswrapper[4938]: E1122 11:13:29.600233 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c\": container with ID starting with 4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c not found: ID does not exist" containerID="4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.600310 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c"} err="failed to get container status \"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c\": rpc error: code = NotFound desc = could not find container \"4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c\": container with ID starting with 4517a58e00eeb27f94e32791a862b5a25f94cb2100e4a61e6abe22600c53984c not found: ID does not exist" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.600355 4938 scope.go:117] "RemoveContainer" containerID="2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af" Nov 22 11:13:29 crc kubenswrapper[4938]: E1122 11:13:29.600819 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af\": container with ID starting with 2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af not found: ID does not exist" containerID="2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.600876 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af"} err="failed to get container status \"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af\": rpc error: code = NotFound desc = could not find container \"2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af\": container with ID starting with 2ae98645c6065995219b1cd3ef00c932ec4ce4198b6c6257a083384c8da752af not found: ID does not exist" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.600954 4938 scope.go:117] "RemoveContainer" containerID="85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac" Nov 22 11:13:29 crc kubenswrapper[4938]: E1122 11:13:29.601248 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac\": container with ID starting with 85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac not found: ID does not exist" containerID="85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac" Nov 22 11:13:29 crc kubenswrapper[4938]: I1122 11:13:29.601278 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac"} err="failed to get container status \"85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac\": rpc error: code = NotFound desc = could not find container \"85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac\": container with ID starting with 85b90a09e2baca1ad1d9828b4c2e1c9b2b6d0d42652481d571093eddd024a8ac not found: ID does not exist" Nov 22 11:13:30 crc kubenswrapper[4938]: I1122 11:13:30.457506 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" path="/var/lib/kubelet/pods/9c1ec982-1ecd-4341-9552-44f2b3da4f3a/volumes" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.529537 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:13:54 crc kubenswrapper[4938]: E1122 11:13:54.530698 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="extract-utilities" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.530716 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="extract-utilities" Nov 22 11:13:54 crc kubenswrapper[4938]: E1122 11:13:54.530736 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="extract-content" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.530743 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="extract-content" Nov 22 11:13:54 crc kubenswrapper[4938]: E1122 11:13:54.530752 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="registry-server" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.530760 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="registry-server" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.531007 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c1ec982-1ecd-4341-9552-44f2b3da4f3a" containerName="registry-server" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.532983 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.543073 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.670809 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.670962 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.670991 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmvfd\" (UniqueName: \"kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.772673 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.773200 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.773708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.773766 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmvfd\" (UniqueName: \"kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.774047 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.793807 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmvfd\" (UniqueName: \"kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd\") pod \"redhat-marketplace-7f4vd\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:54 crc kubenswrapper[4938]: I1122 11:13:54.855134 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:13:55 crc kubenswrapper[4938]: I1122 11:13:55.280297 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:13:55 crc kubenswrapper[4938]: I1122 11:13:55.726091 4938 generic.go:334] "Generic (PLEG): container finished" podID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerID="b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45" exitCode=0 Nov 22 11:13:55 crc kubenswrapper[4938]: I1122 11:13:55.726153 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerDied","Data":"b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45"} Nov 22 11:13:55 crc kubenswrapper[4938]: I1122 11:13:55.726387 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerStarted","Data":"24b8682935221a2fdc2ef9603b48d44f139b6294adee214df81501eaa70a47c3"} Nov 22 11:13:56 crc kubenswrapper[4938]: I1122 11:13:56.739216 4938 generic.go:334] "Generic (PLEG): container finished" podID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerID="e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47" exitCode=0 Nov 22 11:13:56 crc kubenswrapper[4938]: I1122 11:13:56.739278 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerDied","Data":"e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47"} Nov 22 11:13:58 crc kubenswrapper[4938]: I1122 11:13:58.757570 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerStarted","Data":"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66"} Nov 22 11:13:58 crc kubenswrapper[4938]: I1122 11:13:58.778518 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7f4vd" podStartSLOduration=2.500695782 podStartE2EDuration="4.778500479s" podCreationTimestamp="2025-11-22 11:13:54 +0000 UTC" firstStartedPulling="2025-11-22 11:13:55.727608549 +0000 UTC m=+2168.195445948" lastFinishedPulling="2025-11-22 11:13:58.005413246 +0000 UTC m=+2170.473250645" observedRunningTime="2025-11-22 11:13:58.7773414 +0000 UTC m=+2171.245178799" watchObservedRunningTime="2025-11-22 11:13:58.778500479 +0000 UTC m=+2171.246337878" Nov 22 11:14:04 crc kubenswrapper[4938]: I1122 11:14:04.857229 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:04 crc kubenswrapper[4938]: I1122 11:14:04.857808 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:04 crc kubenswrapper[4938]: I1122 11:14:04.912399 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:05 crc kubenswrapper[4938]: I1122 11:14:05.875623 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:05 crc kubenswrapper[4938]: I1122 11:14:05.931514 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:14:07 crc kubenswrapper[4938]: I1122 11:14:07.841178 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7f4vd" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="registry-server" containerID="cri-o://56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66" gracePeriod=2 Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.322162 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.452823 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities\") pod \"07112f77-6997-40a5-8cf2-76c44fdf5df2\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.452966 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmvfd\" (UniqueName: \"kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd\") pod \"07112f77-6997-40a5-8cf2-76c44fdf5df2\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.453007 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content\") pod \"07112f77-6997-40a5-8cf2-76c44fdf5df2\" (UID: \"07112f77-6997-40a5-8cf2-76c44fdf5df2\") " Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.453992 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities" (OuterVolumeSpecName: "utilities") pod "07112f77-6997-40a5-8cf2-76c44fdf5df2" (UID: "07112f77-6997-40a5-8cf2-76c44fdf5df2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.461129 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd" (OuterVolumeSpecName: "kube-api-access-vmvfd") pod "07112f77-6997-40a5-8cf2-76c44fdf5df2" (UID: "07112f77-6997-40a5-8cf2-76c44fdf5df2"). InnerVolumeSpecName "kube-api-access-vmvfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.472065 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07112f77-6997-40a5-8cf2-76c44fdf5df2" (UID: "07112f77-6997-40a5-8cf2-76c44fdf5df2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.556376 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.556507 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmvfd\" (UniqueName: \"kubernetes.io/projected/07112f77-6997-40a5-8cf2-76c44fdf5df2-kube-api-access-vmvfd\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.556531 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07112f77-6997-40a5-8cf2-76c44fdf5df2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.852666 4938 generic.go:334] "Generic (PLEG): container finished" podID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerID="56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66" exitCode=0 Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.852701 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f4vd" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.852707 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerDied","Data":"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66"} Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.852731 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f4vd" event={"ID":"07112f77-6997-40a5-8cf2-76c44fdf5df2","Type":"ContainerDied","Data":"24b8682935221a2fdc2ef9603b48d44f139b6294adee214df81501eaa70a47c3"} Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.852747 4938 scope.go:117] "RemoveContainer" containerID="56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.887242 4938 scope.go:117] "RemoveContainer" containerID="e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.897551 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.904717 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f4vd"] Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.910193 4938 scope.go:117] "RemoveContainer" containerID="b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.953875 4938 scope.go:117] "RemoveContainer" containerID="56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66" Nov 22 11:14:08 crc kubenswrapper[4938]: E1122 11:14:08.954696 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66\": container with ID starting with 56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66 not found: ID does not exist" containerID="56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.954737 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66"} err="failed to get container status \"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66\": rpc error: code = NotFound desc = could not find container \"56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66\": container with ID starting with 56a9dd0b31dea68abdbd3a5afb81d8a68c74b71ae19c3efdf3c7be811f6ede66 not found: ID does not exist" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.954765 4938 scope.go:117] "RemoveContainer" containerID="e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47" Nov 22 11:14:08 crc kubenswrapper[4938]: E1122 11:14:08.956052 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47\": container with ID starting with e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47 not found: ID does not exist" containerID="e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.956093 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47"} err="failed to get container status \"e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47\": rpc error: code = NotFound desc = could not find container \"e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47\": container with ID starting with e376b7f59a9b3e46a989c45ecea5e2f634dfc6ae8ebe1d9cdcaa0e5a461fda47 not found: ID does not exist" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.956121 4938 scope.go:117] "RemoveContainer" containerID="b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45" Nov 22 11:14:08 crc kubenswrapper[4938]: E1122 11:14:08.956432 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45\": container with ID starting with b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45 not found: ID does not exist" containerID="b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45" Nov 22 11:14:08 crc kubenswrapper[4938]: I1122 11:14:08.956456 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45"} err="failed to get container status \"b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45\": rpc error: code = NotFound desc = could not find container \"b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45\": container with ID starting with b2f98a53ab2ed2690c4adf2bec097e02e43f6402c172642dc2e7f9d168433e45 not found: ID does not exist" Nov 22 11:14:09 crc kubenswrapper[4938]: I1122 11:14:09.863973 4938 generic.go:334] "Generic (PLEG): container finished" podID="2dc63cc5-838b-4bdf-86fe-46ede44788b3" containerID="c9edbb167d1147a3a4ea67af5e09bd7d15ec6c34c1b1e937ca082de0170db6b6" exitCode=0 Nov 22 11:14:09 crc kubenswrapper[4938]: I1122 11:14:09.864320 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" event={"ID":"2dc63cc5-838b-4bdf-86fe-46ede44788b3","Type":"ContainerDied","Data":"c9edbb167d1147a3a4ea67af5e09bd7d15ec6c34c1b1e937ca082de0170db6b6"} Nov 22 11:14:10 crc kubenswrapper[4938]: I1122 11:14:10.460246 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" path="/var/lib/kubelet/pods/07112f77-6997-40a5-8cf2-76c44fdf5df2/volumes" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.247067 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.301793 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.301858 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.407955 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.408107 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tff2d\" (UniqueName: \"kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.408180 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.408303 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.408746 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.408810 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle\") pod \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\" (UID: \"2dc63cc5-838b-4bdf-86fe-46ede44788b3\") " Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.415190 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d" (OuterVolumeSpecName: "kube-api-access-tff2d") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "kube-api-access-tff2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.425298 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.438058 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.438977 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.439592 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.447841 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory" (OuterVolumeSpecName: "inventory") pod "2dc63cc5-838b-4bdf-86fe-46ede44788b3" (UID: "2dc63cc5-838b-4bdf-86fe-46ede44788b3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511092 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511218 4938 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511384 4938 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511483 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tff2d\" (UniqueName: \"kubernetes.io/projected/2dc63cc5-838b-4bdf-86fe-46ede44788b3-kube-api-access-tff2d\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511563 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.511638 4938 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/2dc63cc5-838b-4bdf-86fe-46ede44788b3-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.891274 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" event={"ID":"2dc63cc5-838b-4bdf-86fe-46ede44788b3","Type":"ContainerDied","Data":"ec6db715e23e543402a5a8bd80695395a7fe36e8f62718f27ad4dee701b4ecd0"} Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.891315 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec6db715e23e543402a5a8bd80695395a7fe36e8f62718f27ad4dee701b4ecd0" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.891366 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.971998 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75"] Nov 22 11:14:11 crc kubenswrapper[4938]: E1122 11:14:11.972353 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc63cc5-838b-4bdf-86fe-46ede44788b3" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972374 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc63cc5-838b-4bdf-86fe-46ede44788b3" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:14:11 crc kubenswrapper[4938]: E1122 11:14:11.972407 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="registry-server" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972414 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="registry-server" Nov 22 11:14:11 crc kubenswrapper[4938]: E1122 11:14:11.972428 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="extract-utilities" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972434 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="extract-utilities" Nov 22 11:14:11 crc kubenswrapper[4938]: E1122 11:14:11.972446 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="extract-content" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972453 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="extract-content" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972617 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="07112f77-6997-40a5-8cf2-76c44fdf5df2" containerName="registry-server" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.972649 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc63cc5-838b-4bdf-86fe-46ede44788b3" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.973367 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.977275 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.977571 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.977870 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.977977 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.978207 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:14:11 crc kubenswrapper[4938]: I1122 11:14:11.981329 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75"] Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.041758 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.041808 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp86g\" (UniqueName: \"kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.042037 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.042165 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.042403 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.144733 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.145122 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp86g\" (UniqueName: \"kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.145200 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.145253 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.145293 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.149466 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.149486 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.149517 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.149902 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.162342 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp86g\" (UniqueName: \"kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-gcd75\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.347286 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.881670 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75"] Nov 22 11:14:12 crc kubenswrapper[4938]: W1122 11:14:12.885063 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b52293d_9695_46ab_8248_af8bb1a3c464.slice/crio-576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07 WatchSource:0}: Error finding container 576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07: Status 404 returned error can't find the container with id 576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07 Nov 22 11:14:12 crc kubenswrapper[4938]: I1122 11:14:12.902162 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" event={"ID":"6b52293d-9695-46ab-8248-af8bb1a3c464","Type":"ContainerStarted","Data":"576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07"} Nov 22 11:14:13 crc kubenswrapper[4938]: I1122 11:14:13.910641 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" event={"ID":"6b52293d-9695-46ab-8248-af8bb1a3c464","Type":"ContainerStarted","Data":"db69d1e9104a1bd2f97bc7b6417eac5dd5af162eb479792f63a390f9709800e3"} Nov 22 11:14:13 crc kubenswrapper[4938]: I1122 11:14:13.930284 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" podStartSLOduration=2.245838386 podStartE2EDuration="2.930265458s" podCreationTimestamp="2025-11-22 11:14:11 +0000 UTC" firstStartedPulling="2025-11-22 11:14:12.887332977 +0000 UTC m=+2185.355170386" lastFinishedPulling="2025-11-22 11:14:13.571760059 +0000 UTC m=+2186.039597458" observedRunningTime="2025-11-22 11:14:13.92354645 +0000 UTC m=+2186.391383859" watchObservedRunningTime="2025-11-22 11:14:13.930265458 +0000 UTC m=+2186.398102857" Nov 22 11:14:41 crc kubenswrapper[4938]: I1122 11:14:41.301096 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:14:41 crc kubenswrapper[4938]: I1122 11:14:41.301530 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.148998 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n"] Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.151355 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.154534 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.155309 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.160489 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n"] Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.174354 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.174442 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.174498 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tbqk\" (UniqueName: \"kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.276068 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.276206 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.276258 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tbqk\" (UniqueName: \"kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.278303 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.284552 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.294068 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tbqk\" (UniqueName: \"kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk\") pod \"collect-profiles-29396835-ntb9n\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.481963 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:00 crc kubenswrapper[4938]: I1122 11:15:00.928354 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n"] Nov 22 11:15:01 crc kubenswrapper[4938]: I1122 11:15:01.332962 4938 generic.go:334] "Generic (PLEG): container finished" podID="1256543b-1791-499d-b15a-bd42f019352c" containerID="cf64aaa2d8120441c2833adb5ccc41919d8f144e03910120ccf90ea5aeadabb4" exitCode=0 Nov 22 11:15:01 crc kubenswrapper[4938]: I1122 11:15:01.333046 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" event={"ID":"1256543b-1791-499d-b15a-bd42f019352c","Type":"ContainerDied","Data":"cf64aaa2d8120441c2833adb5ccc41919d8f144e03910120ccf90ea5aeadabb4"} Nov 22 11:15:01 crc kubenswrapper[4938]: I1122 11:15:01.333328 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" event={"ID":"1256543b-1791-499d-b15a-bd42f019352c","Type":"ContainerStarted","Data":"7eacee5b8f2a21258fcaec418ea22e5dabded08a0c6b5d9cacc961e670e65073"} Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.627009 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.822111 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tbqk\" (UniqueName: \"kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk\") pod \"1256543b-1791-499d-b15a-bd42f019352c\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.822261 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume\") pod \"1256543b-1791-499d-b15a-bd42f019352c\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.822375 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume\") pod \"1256543b-1791-499d-b15a-bd42f019352c\" (UID: \"1256543b-1791-499d-b15a-bd42f019352c\") " Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.823292 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume" (OuterVolumeSpecName: "config-volume") pod "1256543b-1791-499d-b15a-bd42f019352c" (UID: "1256543b-1791-499d-b15a-bd42f019352c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.829412 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1256543b-1791-499d-b15a-bd42f019352c" (UID: "1256543b-1791-499d-b15a-bd42f019352c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.829562 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk" (OuterVolumeSpecName: "kube-api-access-7tbqk") pod "1256543b-1791-499d-b15a-bd42f019352c" (UID: "1256543b-1791-499d-b15a-bd42f019352c"). InnerVolumeSpecName "kube-api-access-7tbqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.924706 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tbqk\" (UniqueName: \"kubernetes.io/projected/1256543b-1791-499d-b15a-bd42f019352c-kube-api-access-7tbqk\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.925049 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1256543b-1791-499d-b15a-bd42f019352c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:02 crc kubenswrapper[4938]: I1122 11:15:02.925119 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1256543b-1791-499d-b15a-bd42f019352c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:03 crc kubenswrapper[4938]: I1122 11:15:03.348600 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" event={"ID":"1256543b-1791-499d-b15a-bd42f019352c","Type":"ContainerDied","Data":"7eacee5b8f2a21258fcaec418ea22e5dabded08a0c6b5d9cacc961e670e65073"} Nov 22 11:15:03 crc kubenswrapper[4938]: I1122 11:15:03.348636 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-ntb9n" Nov 22 11:15:03 crc kubenswrapper[4938]: I1122 11:15:03.348640 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7eacee5b8f2a21258fcaec418ea22e5dabded08a0c6b5d9cacc961e670e65073" Nov 22 11:15:03 crc kubenswrapper[4938]: I1122 11:15:03.694600 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n"] Nov 22 11:15:03 crc kubenswrapper[4938]: I1122 11:15:03.701278 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-q5d2n"] Nov 22 11:15:04 crc kubenswrapper[4938]: I1122 11:15:04.456930 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d2cdfd7-9190-4322-81d6-cf73f4815c8c" path="/var/lib/kubelet/pods/3d2cdfd7-9190-4322-81d6-cf73f4815c8c/volumes" Nov 22 11:15:11 crc kubenswrapper[4938]: I1122 11:15:11.300637 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:15:11 crc kubenswrapper[4938]: I1122 11:15:11.301141 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:15:11 crc kubenswrapper[4938]: I1122 11:15:11.301186 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:15:11 crc kubenswrapper[4938]: I1122 11:15:11.301852 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:15:11 crc kubenswrapper[4938]: I1122 11:15:11.301923 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" gracePeriod=600 Nov 22 11:15:11 crc kubenswrapper[4938]: E1122 11:15:11.468294 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:15:12 crc kubenswrapper[4938]: I1122 11:15:12.435166 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" exitCode=0 Nov 22 11:15:12 crc kubenswrapper[4938]: I1122 11:15:12.435320 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1"} Nov 22 11:15:12 crc kubenswrapper[4938]: I1122 11:15:12.435557 4938 scope.go:117] "RemoveContainer" containerID="354b7cee0faf75c6141560f2e8431b8d74b396591ebde1b2d4ea604134123035" Nov 22 11:15:12 crc kubenswrapper[4938]: I1122 11:15:12.436352 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:15:12 crc kubenswrapper[4938]: E1122 11:15:12.436694 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:15:23 crc kubenswrapper[4938]: I1122 11:15:23.447613 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:15:23 crc kubenswrapper[4938]: E1122 11:15:23.448886 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:15:26 crc kubenswrapper[4938]: I1122 11:15:26.117937 4938 scope.go:117] "RemoveContainer" containerID="bfaf8526044e1feb6345834ea24d8521f5977f572a78d06001135fbcd9f73a02" Nov 22 11:15:35 crc kubenswrapper[4938]: I1122 11:15:35.447313 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:15:35 crc kubenswrapper[4938]: E1122 11:15:35.448114 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:15:46 crc kubenswrapper[4938]: I1122 11:15:46.447903 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:15:46 crc kubenswrapper[4938]: E1122 11:15:46.448722 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:15:57 crc kubenswrapper[4938]: I1122 11:15:57.447625 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:15:57 crc kubenswrapper[4938]: E1122 11:15:57.448466 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:16:12 crc kubenswrapper[4938]: I1122 11:16:12.447847 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:16:12 crc kubenswrapper[4938]: E1122 11:16:12.448615 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:16:26 crc kubenswrapper[4938]: I1122 11:16:26.447297 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:16:26 crc kubenswrapper[4938]: E1122 11:16:26.448270 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:16:37 crc kubenswrapper[4938]: I1122 11:16:37.447767 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:16:37 crc kubenswrapper[4938]: E1122 11:16:37.448722 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:16:48 crc kubenswrapper[4938]: I1122 11:16:48.453158 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:16:48 crc kubenswrapper[4938]: E1122 11:16:48.453860 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:02 crc kubenswrapper[4938]: I1122 11:17:02.448745 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:17:02 crc kubenswrapper[4938]: E1122 11:17:02.449650 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:16 crc kubenswrapper[4938]: I1122 11:17:16.447934 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:17:16 crc kubenswrapper[4938]: E1122 11:17:16.448618 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:27 crc kubenswrapper[4938]: I1122 11:17:27.447580 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:17:27 crc kubenswrapper[4938]: E1122 11:17:27.448460 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.470336 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:34 crc kubenswrapper[4938]: E1122 11:17:34.471287 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1256543b-1791-499d-b15a-bd42f019352c" containerName="collect-profiles" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.471305 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="1256543b-1791-499d-b15a-bd42f019352c" containerName="collect-profiles" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.471584 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="1256543b-1791-499d-b15a-bd42f019352c" containerName="collect-profiles" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.473223 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.473780 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.527605 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.527773 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22w22\" (UniqueName: \"kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.527815 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.630101 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.630512 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22w22\" (UniqueName: \"kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.630667 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.630686 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.631300 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.662187 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22w22\" (UniqueName: \"kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22\") pod \"certified-operators-2gcfn\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:34 crc kubenswrapper[4938]: I1122 11:17:34.827766 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:35 crc kubenswrapper[4938]: I1122 11:17:35.137119 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:35 crc kubenswrapper[4938]: I1122 11:17:35.655786 4938 generic.go:334] "Generic (PLEG): container finished" podID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerID="80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20" exitCode=0 Nov 22 11:17:35 crc kubenswrapper[4938]: I1122 11:17:35.655897 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerDied","Data":"80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20"} Nov 22 11:17:35 crc kubenswrapper[4938]: I1122 11:17:35.656303 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerStarted","Data":"bcd15ae9d7b33546310db8b20450951cc5db7f775aec0e2c5cd6ab88347e62d5"} Nov 22 11:17:35 crc kubenswrapper[4938]: I1122 11:17:35.657666 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:17:36 crc kubenswrapper[4938]: I1122 11:17:36.665174 4938 generic.go:334] "Generic (PLEG): container finished" podID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerID="10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6" exitCode=0 Nov 22 11:17:36 crc kubenswrapper[4938]: I1122 11:17:36.665252 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerDied","Data":"10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6"} Nov 22 11:17:37 crc kubenswrapper[4938]: I1122 11:17:37.690479 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerStarted","Data":"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0"} Nov 22 11:17:37 crc kubenswrapper[4938]: I1122 11:17:37.706423 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2gcfn" podStartSLOduration=1.929113451 podStartE2EDuration="3.706408391s" podCreationTimestamp="2025-11-22 11:17:34 +0000 UTC" firstStartedPulling="2025-11-22 11:17:35.657393942 +0000 UTC m=+2388.125231341" lastFinishedPulling="2025-11-22 11:17:37.434688872 +0000 UTC m=+2389.902526281" observedRunningTime="2025-11-22 11:17:37.704404542 +0000 UTC m=+2390.172241941" watchObservedRunningTime="2025-11-22 11:17:37.706408391 +0000 UTC m=+2390.174245790" Nov 22 11:17:41 crc kubenswrapper[4938]: I1122 11:17:41.448044 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:17:41 crc kubenswrapper[4938]: E1122 11:17:41.448964 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:44 crc kubenswrapper[4938]: I1122 11:17:44.828426 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:44 crc kubenswrapper[4938]: I1122 11:17:44.828968 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:44 crc kubenswrapper[4938]: I1122 11:17:44.876767 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:45 crc kubenswrapper[4938]: I1122 11:17:45.797866 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:45 crc kubenswrapper[4938]: I1122 11:17:45.837714 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:47 crc kubenswrapper[4938]: I1122 11:17:47.767474 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2gcfn" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="registry-server" containerID="cri-o://305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0" gracePeriod=2 Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.333783 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.435086 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22w22\" (UniqueName: \"kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22\") pod \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.435289 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content\") pod \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.435319 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities\") pod \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\" (UID: \"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f\") " Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.436266 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities" (OuterVolumeSpecName: "utilities") pod "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" (UID: "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.437552 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.443023 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22" (OuterVolumeSpecName: "kube-api-access-22w22") pod "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" (UID: "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f"). InnerVolumeSpecName "kube-api-access-22w22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.486461 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" (UID: "68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.540227 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22w22\" (UniqueName: \"kubernetes.io/projected/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-kube-api-access-22w22\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.540275 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.778489 4938 generic.go:334] "Generic (PLEG): container finished" podID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerID="305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0" exitCode=0 Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.778537 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerDied","Data":"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0"} Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.778572 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gcfn" event={"ID":"68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f","Type":"ContainerDied","Data":"bcd15ae9d7b33546310db8b20450951cc5db7f775aec0e2c5cd6ab88347e62d5"} Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.778592 4938 scope.go:117] "RemoveContainer" containerID="305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.778733 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gcfn" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.818220 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.824056 4938 scope.go:117] "RemoveContainer" containerID="10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.857102 4938 scope.go:117] "RemoveContainer" containerID="80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.857680 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2gcfn"] Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.889440 4938 scope.go:117] "RemoveContainer" containerID="305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0" Nov 22 11:17:48 crc kubenswrapper[4938]: E1122 11:17:48.890241 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0\": container with ID starting with 305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0 not found: ID does not exist" containerID="305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.890283 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0"} err="failed to get container status \"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0\": rpc error: code = NotFound desc = could not find container \"305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0\": container with ID starting with 305b62eb4fd29602b9b513242b0863f66295c20dfeb90b7ae2fa145727102fd0 not found: ID does not exist" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.890312 4938 scope.go:117] "RemoveContainer" containerID="10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6" Nov 22 11:17:48 crc kubenswrapper[4938]: E1122 11:17:48.890652 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6\": container with ID starting with 10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6 not found: ID does not exist" containerID="10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.890686 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6"} err="failed to get container status \"10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6\": rpc error: code = NotFound desc = could not find container \"10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6\": container with ID starting with 10582a2eda64ec289becd6fe23946447fb21d84d4b8ffd93cb698d9b4a49c3e6 not found: ID does not exist" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.890709 4938 scope.go:117] "RemoveContainer" containerID="80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20" Nov 22 11:17:48 crc kubenswrapper[4938]: E1122 11:17:48.891028 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20\": container with ID starting with 80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20 not found: ID does not exist" containerID="80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20" Nov 22 11:17:48 crc kubenswrapper[4938]: I1122 11:17:48.891054 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20"} err="failed to get container status \"80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20\": rpc error: code = NotFound desc = could not find container \"80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20\": container with ID starting with 80dbe4c02396d2ba9fa67b11f16a5be9b5b054e93473bf9b31b1ed8ecaffee20 not found: ID does not exist" Nov 22 11:17:50 crc kubenswrapper[4938]: I1122 11:17:50.460577 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" path="/var/lib/kubelet/pods/68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f/volumes" Nov 22 11:17:52 crc kubenswrapper[4938]: I1122 11:17:52.448327 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:17:52 crc kubenswrapper[4938]: E1122 11:17:52.448871 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.164507 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:17:56 crc kubenswrapper[4938]: E1122 11:17:56.165573 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="extract-content" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.165592 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="extract-content" Nov 22 11:17:56 crc kubenswrapper[4938]: E1122 11:17:56.165625 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="extract-utilities" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.165633 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="extract-utilities" Nov 22 11:17:56 crc kubenswrapper[4938]: E1122 11:17:56.165646 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="registry-server" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.165654 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="registry-server" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.165859 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="68d5ffdd-bc2e-41a4-b46b-9e00f8fef32f" containerName="registry-server" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.167707 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.183759 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.282874 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.283189 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwrv9\" (UniqueName: \"kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.283310 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.386494 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.386674 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwrv9\" (UniqueName: \"kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.386714 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.387217 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.387433 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.411321 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwrv9\" (UniqueName: \"kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9\") pod \"community-operators-68gxs\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:56 crc kubenswrapper[4938]: I1122 11:17:56.486007 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:17:57 crc kubenswrapper[4938]: I1122 11:17:57.059764 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:17:57 crc kubenswrapper[4938]: W1122 11:17:57.070820 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5a0db44_c7d3_49b0_a6fa_0a2d0d0e82cc.slice/crio-5b305b50b07793777df81fbcf4d3b9157c1d5569c4eff2f5b368e9a671e01dd5 WatchSource:0}: Error finding container 5b305b50b07793777df81fbcf4d3b9157c1d5569c4eff2f5b368e9a671e01dd5: Status 404 returned error can't find the container with id 5b305b50b07793777df81fbcf4d3b9157c1d5569c4eff2f5b368e9a671e01dd5 Nov 22 11:17:57 crc kubenswrapper[4938]: I1122 11:17:57.860017 4938 generic.go:334] "Generic (PLEG): container finished" podID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerID="d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b" exitCode=0 Nov 22 11:17:57 crc kubenswrapper[4938]: I1122 11:17:57.860210 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerDied","Data":"d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b"} Nov 22 11:17:57 crc kubenswrapper[4938]: I1122 11:17:57.860333 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerStarted","Data":"5b305b50b07793777df81fbcf4d3b9157c1d5569c4eff2f5b368e9a671e01dd5"} Nov 22 11:17:58 crc kubenswrapper[4938]: I1122 11:17:58.874377 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerStarted","Data":"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17"} Nov 22 11:17:59 crc kubenswrapper[4938]: I1122 11:17:59.888200 4938 generic.go:334] "Generic (PLEG): container finished" podID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerID="f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17" exitCode=0 Nov 22 11:17:59 crc kubenswrapper[4938]: I1122 11:17:59.888681 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerDied","Data":"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17"} Nov 22 11:18:00 crc kubenswrapper[4938]: I1122 11:18:00.899106 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerStarted","Data":"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159"} Nov 22 11:18:00 crc kubenswrapper[4938]: I1122 11:18:00.924850 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-68gxs" podStartSLOduration=2.168309552 podStartE2EDuration="4.924830716s" podCreationTimestamp="2025-11-22 11:17:56 +0000 UTC" firstStartedPulling="2025-11-22 11:17:57.861701705 +0000 UTC m=+2410.329539104" lastFinishedPulling="2025-11-22 11:18:00.618222859 +0000 UTC m=+2413.086060268" observedRunningTime="2025-11-22 11:18:00.917231577 +0000 UTC m=+2413.385068976" watchObservedRunningTime="2025-11-22 11:18:00.924830716 +0000 UTC m=+2413.392668115" Nov 22 11:18:06 crc kubenswrapper[4938]: I1122 11:18:06.486965 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:06 crc kubenswrapper[4938]: I1122 11:18:06.488231 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:06 crc kubenswrapper[4938]: I1122 11:18:06.541309 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:07 crc kubenswrapper[4938]: I1122 11:18:07.003571 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:07 crc kubenswrapper[4938]: I1122 11:18:07.052029 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:18:07 crc kubenswrapper[4938]: I1122 11:18:07.447824 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:18:07 crc kubenswrapper[4938]: E1122 11:18:07.448378 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:18:08 crc kubenswrapper[4938]: I1122 11:18:08.983583 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-68gxs" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="registry-server" containerID="cri-o://172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159" gracePeriod=2 Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.401609 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.455746 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities\") pod \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.455791 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwrv9\" (UniqueName: \"kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9\") pod \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.455866 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content\") pod \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\" (UID: \"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc\") " Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.456727 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities" (OuterVolumeSpecName: "utilities") pod "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" (UID: "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.461042 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9" (OuterVolumeSpecName: "kube-api-access-bwrv9") pod "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" (UID: "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc"). InnerVolumeSpecName "kube-api-access-bwrv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.508033 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" (UID: "e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.557668 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.557700 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwrv9\" (UniqueName: \"kubernetes.io/projected/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-kube-api-access-bwrv9\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.557713 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.991521 4938 generic.go:334] "Generic (PLEG): container finished" podID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerID="172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159" exitCode=0 Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.991561 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerDied","Data":"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159"} Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.991587 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-68gxs" event={"ID":"e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc","Type":"ContainerDied","Data":"5b305b50b07793777df81fbcf4d3b9157c1d5569c4eff2f5b368e9a671e01dd5"} Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.991605 4938 scope.go:117] "RemoveContainer" containerID="172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159" Nov 22 11:18:09 crc kubenswrapper[4938]: I1122 11:18:09.991724 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-68gxs" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.011431 4938 scope.go:117] "RemoveContainer" containerID="f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.024481 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.033851 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-68gxs"] Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.051808 4938 scope.go:117] "RemoveContainer" containerID="d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.087250 4938 scope.go:117] "RemoveContainer" containerID="172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159" Nov 22 11:18:10 crc kubenswrapper[4938]: E1122 11:18:10.087836 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159\": container with ID starting with 172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159 not found: ID does not exist" containerID="172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.087973 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159"} err="failed to get container status \"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159\": rpc error: code = NotFound desc = could not find container \"172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159\": container with ID starting with 172c2c9b1047490a6d6ea2efbe715373477e4113b2379a8392c3e76040628159 not found: ID does not exist" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.088074 4938 scope.go:117] "RemoveContainer" containerID="f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17" Nov 22 11:18:10 crc kubenswrapper[4938]: E1122 11:18:10.088488 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17\": container with ID starting with f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17 not found: ID does not exist" containerID="f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.088608 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17"} err="failed to get container status \"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17\": rpc error: code = NotFound desc = could not find container \"f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17\": container with ID starting with f262d366f92741401303d5f5051bc353ebb8c2bf90d850093f8c0536fee4de17 not found: ID does not exist" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.088726 4938 scope.go:117] "RemoveContainer" containerID="d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b" Nov 22 11:18:10 crc kubenswrapper[4938]: E1122 11:18:10.089167 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b\": container with ID starting with d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b not found: ID does not exist" containerID="d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.089197 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b"} err="failed to get container status \"d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b\": rpc error: code = NotFound desc = could not find container \"d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b\": container with ID starting with d28fad434d0f3c5d47a20b75e999bedb0363f20311a49fc076c29e29cadad87b not found: ID does not exist" Nov 22 11:18:10 crc kubenswrapper[4938]: I1122 11:18:10.456855 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" path="/var/lib/kubelet/pods/e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc/volumes" Nov 22 11:18:19 crc kubenswrapper[4938]: I1122 11:18:19.447521 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:18:19 crc kubenswrapper[4938]: E1122 11:18:19.448225 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:18:23 crc kubenswrapper[4938]: I1122 11:18:23.105892 4938 generic.go:334] "Generic (PLEG): container finished" podID="6b52293d-9695-46ab-8248-af8bb1a3c464" containerID="db69d1e9104a1bd2f97bc7b6417eac5dd5af162eb479792f63a390f9709800e3" exitCode=0 Nov 22 11:18:23 crc kubenswrapper[4938]: I1122 11:18:23.105972 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" event={"ID":"6b52293d-9695-46ab-8248-af8bb1a3c464","Type":"ContainerDied","Data":"db69d1e9104a1bd2f97bc7b6417eac5dd5af162eb479792f63a390f9709800e3"} Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.525112 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.645497 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key\") pod \"6b52293d-9695-46ab-8248-af8bb1a3c464\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.645561 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory\") pod \"6b52293d-9695-46ab-8248-af8bb1a3c464\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.645612 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp86g\" (UniqueName: \"kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g\") pod \"6b52293d-9695-46ab-8248-af8bb1a3c464\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.645634 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle\") pod \"6b52293d-9695-46ab-8248-af8bb1a3c464\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.645721 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0\") pod \"6b52293d-9695-46ab-8248-af8bb1a3c464\" (UID: \"6b52293d-9695-46ab-8248-af8bb1a3c464\") " Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.652344 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g" (OuterVolumeSpecName: "kube-api-access-qp86g") pod "6b52293d-9695-46ab-8248-af8bb1a3c464" (UID: "6b52293d-9695-46ab-8248-af8bb1a3c464"). InnerVolumeSpecName "kube-api-access-qp86g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.656209 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "6b52293d-9695-46ab-8248-af8bb1a3c464" (UID: "6b52293d-9695-46ab-8248-af8bb1a3c464"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.674109 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "6b52293d-9695-46ab-8248-af8bb1a3c464" (UID: "6b52293d-9695-46ab-8248-af8bb1a3c464"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.683840 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory" (OuterVolumeSpecName: "inventory") pod "6b52293d-9695-46ab-8248-af8bb1a3c464" (UID: "6b52293d-9695-46ab-8248-af8bb1a3c464"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.696101 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6b52293d-9695-46ab-8248-af8bb1a3c464" (UID: "6b52293d-9695-46ab-8248-af8bb1a3c464"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.748132 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.748169 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.748180 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp86g\" (UniqueName: \"kubernetes.io/projected/6b52293d-9695-46ab-8248-af8bb1a3c464-kube-api-access-qp86g\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.748193 4938 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:24 crc kubenswrapper[4938]: I1122 11:18:24.748203 4938 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/6b52293d-9695-46ab-8248-af8bb1a3c464-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.122930 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" event={"ID":"6b52293d-9695-46ab-8248-af8bb1a3c464","Type":"ContainerDied","Data":"576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07"} Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.122971 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="576183395fd82f5ddd22776efa62a83964bd2002487565ae81eba0fed8295e07" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.122978 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-gcd75" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.213154 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5"] Nov 22 11:18:25 crc kubenswrapper[4938]: E1122 11:18:25.213664 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b52293d-9695-46ab-8248-af8bb1a3c464" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.213729 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b52293d-9695-46ab-8248-af8bb1a3c464" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:18:25 crc kubenswrapper[4938]: E1122 11:18:25.213784 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="extract-content" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.213870 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="extract-content" Nov 22 11:18:25 crc kubenswrapper[4938]: E1122 11:18:25.213962 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="registry-server" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.214034 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="registry-server" Nov 22 11:18:25 crc kubenswrapper[4938]: E1122 11:18:25.214106 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="extract-utilities" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.214185 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="extract-utilities" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.214421 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b52293d-9695-46ab-8248-af8bb1a3c464" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.214526 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5a0db44-c7d3-49b0-a6fa-0a2d0d0e82cc" containerName="registry-server" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.215222 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.217685 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.220649 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.220821 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.220874 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.221066 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.221211 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.222776 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.241102 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5"] Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.360541 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.360832 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361109 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361195 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361366 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361400 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx4xk\" (UniqueName: \"kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361511 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361765 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.361828 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463478 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463566 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463593 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463639 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463658 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463698 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463719 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463764 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.463782 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx4xk\" (UniqueName: \"kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.465200 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.468597 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.468638 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.470067 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.470591 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.470814 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.477168 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.477928 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.481026 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx4xk\" (UniqueName: \"kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sjbl5\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.537408 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:18:25 crc kubenswrapper[4938]: I1122 11:18:25.882338 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5"] Nov 22 11:18:26 crc kubenswrapper[4938]: I1122 11:18:26.131883 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" event={"ID":"6b2c795f-d47e-411a-a1c0-f59ed58d9506","Type":"ContainerStarted","Data":"d01a83bd98b54f11e6ce42e7110a27ad0f90a3e696128648ef4bb0283f0a8d6a"} Nov 22 11:18:27 crc kubenswrapper[4938]: I1122 11:18:27.140443 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" event={"ID":"6b2c795f-d47e-411a-a1c0-f59ed58d9506","Type":"ContainerStarted","Data":"ad289b231b95d4275e84ee28e7e5e006b85664a25752847caecf684c95882b09"} Nov 22 11:18:27 crc kubenswrapper[4938]: I1122 11:18:27.163175 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" podStartSLOduration=1.765987039 podStartE2EDuration="2.163155335s" podCreationTimestamp="2025-11-22 11:18:25 +0000 UTC" firstStartedPulling="2025-11-22 11:18:25.887543447 +0000 UTC m=+2438.355380846" lastFinishedPulling="2025-11-22 11:18:26.284711743 +0000 UTC m=+2438.752549142" observedRunningTime="2025-11-22 11:18:27.155110105 +0000 UTC m=+2439.622947514" watchObservedRunningTime="2025-11-22 11:18:27.163155335 +0000 UTC m=+2439.630992744" Nov 22 11:18:33 crc kubenswrapper[4938]: I1122 11:18:33.447765 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:18:33 crc kubenswrapper[4938]: E1122 11:18:33.448988 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:18:46 crc kubenswrapper[4938]: I1122 11:18:46.448140 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:18:46 crc kubenswrapper[4938]: E1122 11:18:46.448859 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:18:57 crc kubenswrapper[4938]: I1122 11:18:57.447822 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:18:57 crc kubenswrapper[4938]: E1122 11:18:57.448873 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:19:10 crc kubenswrapper[4938]: I1122 11:19:10.448738 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:19:10 crc kubenswrapper[4938]: E1122 11:19:10.449740 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:19:23 crc kubenswrapper[4938]: I1122 11:19:23.448501 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:19:23 crc kubenswrapper[4938]: E1122 11:19:23.449271 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:19:37 crc kubenswrapper[4938]: I1122 11:19:37.447976 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:19:37 crc kubenswrapper[4938]: E1122 11:19:37.448688 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:19:49 crc kubenswrapper[4938]: I1122 11:19:49.447955 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:19:49 crc kubenswrapper[4938]: E1122 11:19:49.448609 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:20:01 crc kubenswrapper[4938]: I1122 11:20:01.447763 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:20:01 crc kubenswrapper[4938]: E1122 11:20:01.448637 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:20:12 crc kubenswrapper[4938]: I1122 11:20:12.447304 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:20:13 crc kubenswrapper[4938]: I1122 11:20:13.159224 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953"} Nov 22 11:21:11 crc kubenswrapper[4938]: I1122 11:21:11.720697 4938 generic.go:334] "Generic (PLEG): container finished" podID="6b2c795f-d47e-411a-a1c0-f59ed58d9506" containerID="ad289b231b95d4275e84ee28e7e5e006b85664a25752847caecf684c95882b09" exitCode=0 Nov 22 11:21:11 crc kubenswrapper[4938]: I1122 11:21:11.720826 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" event={"ID":"6b2c795f-d47e-411a-a1c0-f59ed58d9506","Type":"ContainerDied","Data":"ad289b231b95d4275e84ee28e7e5e006b85664a25752847caecf684c95882b09"} Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.120320 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202090 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202147 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202168 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202190 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202223 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202266 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202322 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sx4xk\" (UniqueName: \"kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202385 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.202444 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0\") pod \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\" (UID: \"6b2c795f-d47e-411a-a1c0-f59ed58d9506\") " Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.208561 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.209690 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk" (OuterVolumeSpecName: "kube-api-access-sx4xk") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "kube-api-access-sx4xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.231425 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.231811 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.233885 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.239191 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.240696 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.242276 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.276337 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory" (OuterVolumeSpecName: "inventory") pod "6b2c795f-d47e-411a-a1c0-f59ed58d9506" (UID: "6b2c795f-d47e-411a-a1c0-f59ed58d9506"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305583 4938 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305630 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305645 4938 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305657 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sx4xk\" (UniqueName: \"kubernetes.io/projected/6b2c795f-d47e-411a-a1c0-f59ed58d9506-kube-api-access-sx4xk\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305670 4938 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305682 4938 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305696 4938 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305708 4938 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.305719 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6b2c795f-d47e-411a-a1c0-f59ed58d9506-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.737684 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" event={"ID":"6b2c795f-d47e-411a-a1c0-f59ed58d9506","Type":"ContainerDied","Data":"d01a83bd98b54f11e6ce42e7110a27ad0f90a3e696128648ef4bb0283f0a8d6a"} Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.737720 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d01a83bd98b54f11e6ce42e7110a27ad0f90a3e696128648ef4bb0283f0a8d6a" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.737755 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sjbl5" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.860440 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm"] Nov 22 11:21:13 crc kubenswrapper[4938]: E1122 11:21:13.860812 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b2c795f-d47e-411a-a1c0-f59ed58d9506" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.860832 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b2c795f-d47e-411a-a1c0-f59ed58d9506" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.861094 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b2c795f-d47e-411a-a1c0-f59ed58d9506" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.861702 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.863691 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.863926 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.863939 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.864082 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-wj8cc" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.864345 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:21:13 crc kubenswrapper[4938]: I1122 11:21:13.872548 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm"] Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.018770 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.018818 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.018889 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.018943 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.018992 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.019017 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p27nc\" (UniqueName: \"kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.019145 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.120986 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121314 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121360 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121381 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p27nc\" (UniqueName: \"kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121483 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121518 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.121538 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.125441 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.125787 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.125819 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.126019 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.127310 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.127987 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.144792 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p27nc\" (UniqueName: \"kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.188501 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.721717 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm"] Nov 22 11:21:14 crc kubenswrapper[4938]: I1122 11:21:14.750604 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" event={"ID":"979e4133-a50f-45d0-9eb3-7f684d65c4ce","Type":"ContainerStarted","Data":"fa32ccdf072214bd7319977338083a6650afe690c2ae65324fed5be716bdffb8"} Nov 22 11:21:15 crc kubenswrapper[4938]: I1122 11:21:15.761180 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" event={"ID":"979e4133-a50f-45d0-9eb3-7f684d65c4ce","Type":"ContainerStarted","Data":"34bafdd51a6fe1764b768916a984449ebdcd84709254b9e0f7fe9583f4bc2905"} Nov 22 11:22:41 crc kubenswrapper[4938]: I1122 11:22:41.301151 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:22:41 crc kubenswrapper[4938]: I1122 11:22:41.301601 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:23:11 crc kubenswrapper[4938]: I1122 11:23:11.300513 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:23:11 crc kubenswrapper[4938]: I1122 11:23:11.301048 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:23:38 crc kubenswrapper[4938]: I1122 11:23:38.168759 4938 generic.go:334] "Generic (PLEG): container finished" podID="979e4133-a50f-45d0-9eb3-7f684d65c4ce" containerID="34bafdd51a6fe1764b768916a984449ebdcd84709254b9e0f7fe9583f4bc2905" exitCode=0 Nov 22 11:23:38 crc kubenswrapper[4938]: I1122 11:23:38.169346 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" event={"ID":"979e4133-a50f-45d0-9eb3-7f684d65c4ce","Type":"ContainerDied","Data":"34bafdd51a6fe1764b768916a984449ebdcd84709254b9e0f7fe9583f4bc2905"} Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.711523 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.883664 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884051 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884081 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p27nc\" (UniqueName: \"kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884100 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884172 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884233 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.884259 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0\") pod \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\" (UID: \"979e4133-a50f-45d0-9eb3-7f684d65c4ce\") " Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.889925 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.890553 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc" (OuterVolumeSpecName: "kube-api-access-p27nc") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "kube-api-access-p27nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.912282 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.912331 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.913392 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory" (OuterVolumeSpecName: "inventory") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.925087 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.931822 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "979e4133-a50f-45d0-9eb3-7f684d65c4ce" (UID: "979e4133-a50f-45d0-9eb3-7f684d65c4ce"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.985991 4938 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986025 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p27nc\" (UniqueName: \"kubernetes.io/projected/979e4133-a50f-45d0-9eb3-7f684d65c4ce-kube-api-access-p27nc\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986035 4938 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986044 4938 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986056 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986066 4938 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:39 crc kubenswrapper[4938]: I1122 11:23:39.986076 4938 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/979e4133-a50f-45d0-9eb3-7f684d65c4ce-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 22 11:23:40 crc kubenswrapper[4938]: I1122 11:23:40.190312 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" event={"ID":"979e4133-a50f-45d0-9eb3-7f684d65c4ce","Type":"ContainerDied","Data":"fa32ccdf072214bd7319977338083a6650afe690c2ae65324fed5be716bdffb8"} Nov 22 11:23:40 crc kubenswrapper[4938]: I1122 11:23:40.190350 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa32ccdf072214bd7319977338083a6650afe690c2ae65324fed5be716bdffb8" Nov 22 11:23:40 crc kubenswrapper[4938]: I1122 11:23:40.190383 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm" Nov 22 11:23:41 crc kubenswrapper[4938]: I1122 11:23:41.300861 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:23:41 crc kubenswrapper[4938]: I1122 11:23:41.301219 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:23:41 crc kubenswrapper[4938]: I1122 11:23:41.301267 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:23:41 crc kubenswrapper[4938]: I1122 11:23:41.302034 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:23:41 crc kubenswrapper[4938]: I1122 11:23:41.302109 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953" gracePeriod=600 Nov 22 11:23:42 crc kubenswrapper[4938]: I1122 11:23:42.208672 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953" exitCode=0 Nov 22 11:23:42 crc kubenswrapper[4938]: I1122 11:23:42.208763 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953"} Nov 22 11:23:42 crc kubenswrapper[4938]: I1122 11:23:42.209160 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0"} Nov 22 11:23:42 crc kubenswrapper[4938]: I1122 11:23:42.209184 4938 scope.go:117] "RemoveContainer" containerID="c79cf607060e29358b4f37e0705b41ae076f66319a88df5cfa1b3cd4c6e7d8f1" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.576036 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:07 crc kubenswrapper[4938]: E1122 11:24:07.577014 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979e4133-a50f-45d0-9eb3-7f684d65c4ce" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.577035 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="979e4133-a50f-45d0-9eb3-7f684d65c4ce" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.577290 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="979e4133-a50f-45d0-9eb3-7f684d65c4ce" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.589347 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.589463 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.716749 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.716859 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.717049 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fjgp\" (UniqueName: \"kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.819422 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.819546 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.819624 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fjgp\" (UniqueName: \"kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.820389 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.820466 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.840394 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fjgp\" (UniqueName: \"kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp\") pod \"redhat-operators-7wskg\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:07 crc kubenswrapper[4938]: I1122 11:24:07.923895 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:08 crc kubenswrapper[4938]: I1122 11:24:08.378780 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:08 crc kubenswrapper[4938]: I1122 11:24:08.466139 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerStarted","Data":"b8d3f28aa9bbcb9f3edbd54efcde43f8f789c97629a57eeb8ac01f4474ecfe8c"} Nov 22 11:24:09 crc kubenswrapper[4938]: I1122 11:24:09.476804 4938 generic.go:334] "Generic (PLEG): container finished" podID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerID="42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d" exitCode=0 Nov 22 11:24:09 crc kubenswrapper[4938]: I1122 11:24:09.476862 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerDied","Data":"42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d"} Nov 22 11:24:09 crc kubenswrapper[4938]: I1122 11:24:09.480246 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:24:10 crc kubenswrapper[4938]: I1122 11:24:10.489111 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerStarted","Data":"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf"} Nov 22 11:24:11 crc kubenswrapper[4938]: I1122 11:24:11.499484 4938 generic.go:334] "Generic (PLEG): container finished" podID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerID="ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf" exitCode=0 Nov 22 11:24:11 crc kubenswrapper[4938]: I1122 11:24:11.499575 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerDied","Data":"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf"} Nov 22 11:24:12 crc kubenswrapper[4938]: I1122 11:24:12.511873 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerStarted","Data":"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484"} Nov 22 11:24:12 crc kubenswrapper[4938]: I1122 11:24:12.535118 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7wskg" podStartSLOduration=3.1114175570000002 podStartE2EDuration="5.535098691s" podCreationTimestamp="2025-11-22 11:24:07 +0000 UTC" firstStartedPulling="2025-11-22 11:24:09.478901272 +0000 UTC m=+2781.946738671" lastFinishedPulling="2025-11-22 11:24:11.902582396 +0000 UTC m=+2784.370419805" observedRunningTime="2025-11-22 11:24:12.528899546 +0000 UTC m=+2784.996736975" watchObservedRunningTime="2025-11-22 11:24:12.535098691 +0000 UTC m=+2785.002936090" Nov 22 11:24:17 crc kubenswrapper[4938]: I1122 11:24:17.923991 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:17 crc kubenswrapper[4938]: I1122 11:24:17.924550 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:17 crc kubenswrapper[4938]: I1122 11:24:17.964166 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:18 crc kubenswrapper[4938]: I1122 11:24:18.600009 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:18 crc kubenswrapper[4938]: I1122 11:24:18.649769 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:20 crc kubenswrapper[4938]: I1122 11:24:20.582165 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7wskg" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="registry-server" containerID="cri-o://ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484" gracePeriod=2 Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.567567 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.593712 4938 generic.go:334] "Generic (PLEG): container finished" podID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerID="ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484" exitCode=0 Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.593752 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerDied","Data":"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484"} Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.593777 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wskg" event={"ID":"f55054fc-bd82-4e15-9a97-39e2d5f033e7","Type":"ContainerDied","Data":"b8d3f28aa9bbcb9f3edbd54efcde43f8f789c97629a57eeb8ac01f4474ecfe8c"} Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.593793 4938 scope.go:117] "RemoveContainer" containerID="ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.594288 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wskg" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.624851 4938 scope.go:117] "RemoveContainer" containerID="ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.624985 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fjgp\" (UniqueName: \"kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp\") pod \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.625055 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content\") pod \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.625184 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities\") pod \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\" (UID: \"f55054fc-bd82-4e15-9a97-39e2d5f033e7\") " Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.626220 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities" (OuterVolumeSpecName: "utilities") pod "f55054fc-bd82-4e15-9a97-39e2d5f033e7" (UID: "f55054fc-bd82-4e15-9a97-39e2d5f033e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.641227 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp" (OuterVolumeSpecName: "kube-api-access-7fjgp") pod "f55054fc-bd82-4e15-9a97-39e2d5f033e7" (UID: "f55054fc-bd82-4e15-9a97-39e2d5f033e7"). InnerVolumeSpecName "kube-api-access-7fjgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.655759 4938 scope.go:117] "RemoveContainer" containerID="42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.718627 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f55054fc-bd82-4e15-9a97-39e2d5f033e7" (UID: "f55054fc-bd82-4e15-9a97-39e2d5f033e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.721191 4938 scope.go:117] "RemoveContainer" containerID="ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484" Nov 22 11:24:21 crc kubenswrapper[4938]: E1122 11:24:21.721974 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484\": container with ID starting with ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484 not found: ID does not exist" containerID="ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.722033 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484"} err="failed to get container status \"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484\": rpc error: code = NotFound desc = could not find container \"ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484\": container with ID starting with ed44b20198f26b2750780ba7deb707d9be09576a5f525ce7fa45845ab00ca484 not found: ID does not exist" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.722054 4938 scope.go:117] "RemoveContainer" containerID="ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf" Nov 22 11:24:21 crc kubenswrapper[4938]: E1122 11:24:21.722492 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf\": container with ID starting with ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf not found: ID does not exist" containerID="ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.722533 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf"} err="failed to get container status \"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf\": rpc error: code = NotFound desc = could not find container \"ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf\": container with ID starting with ef7cfc9199cdf882e2e5a92eed11482ec23622f6e42e10c48489bd574e9f14cf not found: ID does not exist" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.722559 4938 scope.go:117] "RemoveContainer" containerID="42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d" Nov 22 11:24:21 crc kubenswrapper[4938]: E1122 11:24:21.722904 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d\": container with ID starting with 42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d not found: ID does not exist" containerID="42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.723116 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d"} err="failed to get container status \"42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d\": rpc error: code = NotFound desc = could not find container \"42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d\": container with ID starting with 42c862c778c0705f606809ca1a4e31d1b812585c5ce4349dda04cf36fe531a7d not found: ID does not exist" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.726534 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fjgp\" (UniqueName: \"kubernetes.io/projected/f55054fc-bd82-4e15-9a97-39e2d5f033e7-kube-api-access-7fjgp\") on node \"crc\" DevicePath \"\"" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.726561 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.726573 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f55054fc-bd82-4e15-9a97-39e2d5f033e7-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.929535 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:21 crc kubenswrapper[4938]: I1122 11:24:21.937580 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7wskg"] Nov 22 11:24:22 crc kubenswrapper[4938]: I1122 11:24:22.459068 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" path="/var/lib/kubelet/pods/f55054fc-bd82-4e15-9a97-39e2d5f033e7/volumes" Nov 22 11:24:28 crc kubenswrapper[4938]: E1122 11:24:28.818081 4938 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.182:47942->38.102.83.182:45775: write tcp 38.102.83.182:47942->38.102.83.182:45775: write: broken pipe Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.843065 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:24:39 crc kubenswrapper[4938]: E1122 11:24:39.844250 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="extract-content" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.844268 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="extract-content" Nov 22 11:24:39 crc kubenswrapper[4938]: E1122 11:24:39.844303 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="extract-utilities" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.844310 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="extract-utilities" Nov 22 11:24:39 crc kubenswrapper[4938]: E1122 11:24:39.844328 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="registry-server" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.844335 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="registry-server" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.844558 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="f55054fc-bd82-4e15-9a97-39e2d5f033e7" containerName="registry-server" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.845567 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.848975 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.849287 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-ldj5t" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.849923 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.850074 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.853425 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.979113 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.979372 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.979518 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.979619 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.979701 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.980461 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.980590 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjm67\" (UniqueName: \"kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.980644 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:39 crc kubenswrapper[4938]: I1122 11:24:39.980686 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.082779 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.082901 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjm67\" (UniqueName: \"kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.082981 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083027 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083170 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083214 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083272 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083306 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.083336 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.084020 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.084020 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.084205 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.085392 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.085703 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.089900 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.090339 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.097873 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.102934 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjm67\" (UniqueName: \"kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.118026 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.169797 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.587977 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:24:40 crc kubenswrapper[4938]: I1122 11:24:40.760941 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7624b768-90d5-4bad-b97e-21ea6549679a","Type":"ContainerStarted","Data":"e9060d9d5dc34ae9aced52b8f8ffd3ba58b4566540a6a6666fe94fb9681a24ea"} Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.917242 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.919543 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.927475 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.973731 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.973780 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:43 crc kubenswrapper[4938]: I1122 11:24:43.973810 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkwkr\" (UniqueName: \"kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.076284 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.076371 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.076405 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkwkr\" (UniqueName: \"kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.076384 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.076816 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.099673 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkwkr\" (UniqueName: \"kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr\") pod \"redhat-marketplace-zpq7z\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:44 crc kubenswrapper[4938]: I1122 11:24:44.293590 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:24:46 crc kubenswrapper[4938]: I1122 11:24:46.272601 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:24:46 crc kubenswrapper[4938]: I1122 11:24:46.817974 4938 generic.go:334] "Generic (PLEG): container finished" podID="390f208b-4787-424f-a2ba-536763ac847d" containerID="e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1" exitCode=0 Nov 22 11:24:46 crc kubenswrapper[4938]: I1122 11:24:46.818385 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerDied","Data":"e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1"} Nov 22 11:24:46 crc kubenswrapper[4938]: I1122 11:24:46.818420 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerStarted","Data":"8681d7cdf5c9cc04e61385211610d4bbfde83f92529902cbc8c5bf3a6f885df5"} Nov 22 11:24:47 crc kubenswrapper[4938]: I1122 11:24:47.829566 4938 generic.go:334] "Generic (PLEG): container finished" podID="390f208b-4787-424f-a2ba-536763ac847d" containerID="ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9" exitCode=0 Nov 22 11:24:47 crc kubenswrapper[4938]: I1122 11:24:47.829651 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerDied","Data":"ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9"} Nov 22 11:25:19 crc kubenswrapper[4938]: E1122 11:25:19.576247 4938 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 22 11:25:19 crc kubenswrapper[4938]: E1122 11:25:19.577137 4938 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sjm67,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(7624b768-90d5-4bad-b97e-21ea6549679a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 11:25:19 crc kubenswrapper[4938]: E1122 11:25:19.578459 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="7624b768-90d5-4bad-b97e-21ea6549679a" Nov 22 11:25:20 crc kubenswrapper[4938]: I1122 11:25:20.177010 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerStarted","Data":"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b"} Nov 22 11:25:20 crc kubenswrapper[4938]: E1122 11:25:20.180510 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="7624b768-90d5-4bad-b97e-21ea6549679a" Nov 22 11:25:20 crc kubenswrapper[4938]: I1122 11:25:20.215074 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zpq7z" podStartSLOduration=31.819030398 podStartE2EDuration="37.215055355s" podCreationTimestamp="2025-11-22 11:24:43 +0000 UTC" firstStartedPulling="2025-11-22 11:24:46.820320848 +0000 UTC m=+2819.288158247" lastFinishedPulling="2025-11-22 11:24:52.216345805 +0000 UTC m=+2824.684183204" observedRunningTime="2025-11-22 11:25:20.214653525 +0000 UTC m=+2852.682490944" watchObservedRunningTime="2025-11-22 11:25:20.215055355 +0000 UTC m=+2852.682892754" Nov 22 11:25:24 crc kubenswrapper[4938]: I1122 11:25:24.294356 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:24 crc kubenswrapper[4938]: I1122 11:25:24.294730 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:25 crc kubenswrapper[4938]: I1122 11:25:25.344339 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-zpq7z" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="registry-server" probeResult="failure" output=< Nov 22 11:25:25 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 11:25:25 crc kubenswrapper[4938]: > Nov 22 11:25:34 crc kubenswrapper[4938]: I1122 11:25:34.005033 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 22 11:25:34 crc kubenswrapper[4938]: I1122 11:25:34.356429 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:34 crc kubenswrapper[4938]: I1122 11:25:34.406677 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:34 crc kubenswrapper[4938]: I1122 11:25:34.596632 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:25:35 crc kubenswrapper[4938]: I1122 11:25:35.330671 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7624b768-90d5-4bad-b97e-21ea6549679a","Type":"ContainerStarted","Data":"7e5553a7a9d309ab8a47ea838f9575be2432066c85fe9157f462061037c75f8f"} Nov 22 11:25:35 crc kubenswrapper[4938]: I1122 11:25:35.350292 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.9362875170000002 podStartE2EDuration="57.350258333s" podCreationTimestamp="2025-11-22 11:24:38 +0000 UTC" firstStartedPulling="2025-11-22 11:24:40.587532387 +0000 UTC m=+2813.055369786" lastFinishedPulling="2025-11-22 11:25:34.001503203 +0000 UTC m=+2866.469340602" observedRunningTime="2025-11-22 11:25:35.348192672 +0000 UTC m=+2867.816030071" watchObservedRunningTime="2025-11-22 11:25:35.350258333 +0000 UTC m=+2867.818095732" Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.345017 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zpq7z" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="registry-server" containerID="cri-o://17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b" gracePeriod=2 Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.820402 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.966422 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkwkr\" (UniqueName: \"kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr\") pod \"390f208b-4787-424f-a2ba-536763ac847d\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.966488 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content\") pod \"390f208b-4787-424f-a2ba-536763ac847d\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.966611 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities\") pod \"390f208b-4787-424f-a2ba-536763ac847d\" (UID: \"390f208b-4787-424f-a2ba-536763ac847d\") " Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.967396 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities" (OuterVolumeSpecName: "utilities") pod "390f208b-4787-424f-a2ba-536763ac847d" (UID: "390f208b-4787-424f-a2ba-536763ac847d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.972364 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr" (OuterVolumeSpecName: "kube-api-access-kkwkr") pod "390f208b-4787-424f-a2ba-536763ac847d" (UID: "390f208b-4787-424f-a2ba-536763ac847d"). InnerVolumeSpecName "kube-api-access-kkwkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:25:36 crc kubenswrapper[4938]: I1122 11:25:36.985775 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "390f208b-4787-424f-a2ba-536763ac847d" (UID: "390f208b-4787-424f-a2ba-536763ac847d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.068183 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.068418 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkwkr\" (UniqueName: \"kubernetes.io/projected/390f208b-4787-424f-a2ba-536763ac847d-kube-api-access-kkwkr\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.068429 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390f208b-4787-424f-a2ba-536763ac847d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.358713 4938 generic.go:334] "Generic (PLEG): container finished" podID="390f208b-4787-424f-a2ba-536763ac847d" containerID="17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b" exitCode=0 Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.358768 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerDied","Data":"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b"} Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.358848 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpq7z" event={"ID":"390f208b-4787-424f-a2ba-536763ac847d","Type":"ContainerDied","Data":"8681d7cdf5c9cc04e61385211610d4bbfde83f92529902cbc8c5bf3a6f885df5"} Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.358868 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpq7z" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.358883 4938 scope.go:117] "RemoveContainer" containerID="17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.388025 4938 scope.go:117] "RemoveContainer" containerID="ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.422390 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.427577 4938 scope.go:117] "RemoveContainer" containerID="e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.427745 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpq7z"] Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.468346 4938 scope.go:117] "RemoveContainer" containerID="17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b" Nov 22 11:25:37 crc kubenswrapper[4938]: E1122 11:25:37.468758 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b\": container with ID starting with 17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b not found: ID does not exist" containerID="17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.468887 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b"} err="failed to get container status \"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b\": rpc error: code = NotFound desc = could not find container \"17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b\": container with ID starting with 17307460a1dab59e989c39ea11a06b590fd1257a0736b2dbba477044d71c257b not found: ID does not exist" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.469047 4938 scope.go:117] "RemoveContainer" containerID="ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9" Nov 22 11:25:37 crc kubenswrapper[4938]: E1122 11:25:37.469562 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9\": container with ID starting with ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9 not found: ID does not exist" containerID="ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.469589 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9"} err="failed to get container status \"ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9\": rpc error: code = NotFound desc = could not find container \"ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9\": container with ID starting with ba17a7f8cda46eefefcc791391bd30b4241655c9af907035d67ee384e58850b9 not found: ID does not exist" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.469604 4938 scope.go:117] "RemoveContainer" containerID="e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1" Nov 22 11:25:37 crc kubenswrapper[4938]: E1122 11:25:37.469937 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1\": container with ID starting with e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1 not found: ID does not exist" containerID="e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1" Nov 22 11:25:37 crc kubenswrapper[4938]: I1122 11:25:37.470055 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1"} err="failed to get container status \"e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1\": rpc error: code = NotFound desc = could not find container \"e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1\": container with ID starting with e17a71719b6937a9723fc267c3e193d7cbdc1d17a8343348d792e58604d096a1 not found: ID does not exist" Nov 22 11:25:38 crc kubenswrapper[4938]: I1122 11:25:38.467074 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="390f208b-4787-424f-a2ba-536763ac847d" path="/var/lib/kubelet/pods/390f208b-4787-424f-a2ba-536763ac847d/volumes" Nov 22 11:25:41 crc kubenswrapper[4938]: I1122 11:25:41.301309 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:25:41 crc kubenswrapper[4938]: I1122 11:25:41.301638 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:11 crc kubenswrapper[4938]: I1122 11:26:11.301400 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:26:11 crc kubenswrapper[4938]: I1122 11:26:11.302363 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:41 crc kubenswrapper[4938]: I1122 11:26:41.300640 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:26:41 crc kubenswrapper[4938]: I1122 11:26:41.301189 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:41 crc kubenswrapper[4938]: I1122 11:26:41.301240 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:26:41 crc kubenswrapper[4938]: I1122 11:26:41.301978 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:26:41 crc kubenswrapper[4938]: I1122 11:26:41.302033 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" gracePeriod=600 Nov 22 11:26:41 crc kubenswrapper[4938]: E1122 11:26:41.438321 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:26:42 crc kubenswrapper[4938]: I1122 11:26:42.106481 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" exitCode=0 Nov 22 11:26:42 crc kubenswrapper[4938]: I1122 11:26:42.106541 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0"} Nov 22 11:26:42 crc kubenswrapper[4938]: I1122 11:26:42.106582 4938 scope.go:117] "RemoveContainer" containerID="79507d44c52adf7251572c57bd849ee03cedcfbbd8cc05038de0f58965cd2953" Nov 22 11:26:42 crc kubenswrapper[4938]: I1122 11:26:42.107306 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:26:42 crc kubenswrapper[4938]: E1122 11:26:42.107546 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:26:54 crc kubenswrapper[4938]: I1122 11:26:54.449271 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:26:54 crc kubenswrapper[4938]: E1122 11:26:54.450381 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:27:06 crc kubenswrapper[4938]: I1122 11:27:06.448236 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:27:06 crc kubenswrapper[4938]: E1122 11:27:06.449127 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:27:21 crc kubenswrapper[4938]: I1122 11:27:21.449224 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:27:21 crc kubenswrapper[4938]: E1122 11:27:21.450562 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:27:35 crc kubenswrapper[4938]: I1122 11:27:35.447249 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:27:35 crc kubenswrapper[4938]: E1122 11:27:35.448499 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.311651 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:40 crc kubenswrapper[4938]: E1122 11:27:40.312678 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="extract-content" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.312695 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="extract-content" Nov 22 11:27:40 crc kubenswrapper[4938]: E1122 11:27:40.312728 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="extract-utilities" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.312738 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="extract-utilities" Nov 22 11:27:40 crc kubenswrapper[4938]: E1122 11:27:40.312762 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="registry-server" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.312770 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="registry-server" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.313025 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="390f208b-4787-424f-a2ba-536763ac847d" containerName="registry-server" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.314816 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.322785 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.469423 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.470036 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st6vs\" (UniqueName: \"kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.470069 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.571697 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.571828 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st6vs\" (UniqueName: \"kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.571859 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.572254 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.572354 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.598360 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st6vs\" (UniqueName: \"kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs\") pod \"certified-operators-9lrt9\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:40 crc kubenswrapper[4938]: I1122 11:27:40.655575 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:41 crc kubenswrapper[4938]: I1122 11:27:41.206596 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:41 crc kubenswrapper[4938]: I1122 11:27:41.752093 4938 generic.go:334] "Generic (PLEG): container finished" podID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerID="6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1" exitCode=0 Nov 22 11:27:41 crc kubenswrapper[4938]: I1122 11:27:41.752156 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerDied","Data":"6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1"} Nov 22 11:27:41 crc kubenswrapper[4938]: I1122 11:27:41.752541 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerStarted","Data":"409dfef8060d59aff679a8b3d98edc65d41582964bb027bd28b766f6cc217061"} Nov 22 11:27:42 crc kubenswrapper[4938]: I1122 11:27:42.762947 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerStarted","Data":"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe"} Nov 22 11:27:43 crc kubenswrapper[4938]: I1122 11:27:43.783266 4938 generic.go:334] "Generic (PLEG): container finished" podID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerID="e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe" exitCode=0 Nov 22 11:27:43 crc kubenswrapper[4938]: I1122 11:27:43.783475 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerDied","Data":"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe"} Nov 22 11:27:44 crc kubenswrapper[4938]: I1122 11:27:44.794337 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerStarted","Data":"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af"} Nov 22 11:27:47 crc kubenswrapper[4938]: I1122 11:27:47.447815 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:27:47 crc kubenswrapper[4938]: E1122 11:27:47.448399 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.656950 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.657530 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.724843 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.751962 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9lrt9" podStartSLOduration=8.262903903 podStartE2EDuration="10.751944133s" podCreationTimestamp="2025-11-22 11:27:40 +0000 UTC" firstStartedPulling="2025-11-22 11:27:41.755247414 +0000 UTC m=+2994.223084823" lastFinishedPulling="2025-11-22 11:27:44.244287654 +0000 UTC m=+2996.712125053" observedRunningTime="2025-11-22 11:27:44.824362028 +0000 UTC m=+2997.292199437" watchObservedRunningTime="2025-11-22 11:27:50.751944133 +0000 UTC m=+3003.219781532" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.894503 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:50 crc kubenswrapper[4938]: I1122 11:27:50.961202 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:52 crc kubenswrapper[4938]: I1122 11:27:52.863222 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9lrt9" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="registry-server" containerID="cri-o://a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af" gracePeriod=2 Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.363184 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.466337 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities\") pod \"3743da9c-1b50-4843-beb2-71222c86bd6e\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.466453 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st6vs\" (UniqueName: \"kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs\") pod \"3743da9c-1b50-4843-beb2-71222c86bd6e\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.466577 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content\") pod \"3743da9c-1b50-4843-beb2-71222c86bd6e\" (UID: \"3743da9c-1b50-4843-beb2-71222c86bd6e\") " Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.467399 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities" (OuterVolumeSpecName: "utilities") pod "3743da9c-1b50-4843-beb2-71222c86bd6e" (UID: "3743da9c-1b50-4843-beb2-71222c86bd6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.472118 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs" (OuterVolumeSpecName: "kube-api-access-st6vs") pod "3743da9c-1b50-4843-beb2-71222c86bd6e" (UID: "3743da9c-1b50-4843-beb2-71222c86bd6e"). InnerVolumeSpecName "kube-api-access-st6vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.512640 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3743da9c-1b50-4843-beb2-71222c86bd6e" (UID: "3743da9c-1b50-4843-beb2-71222c86bd6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.568858 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.568897 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st6vs\" (UniqueName: \"kubernetes.io/projected/3743da9c-1b50-4843-beb2-71222c86bd6e-kube-api-access-st6vs\") on node \"crc\" DevicePath \"\"" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.568980 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3743da9c-1b50-4843-beb2-71222c86bd6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.873638 4938 generic.go:334] "Generic (PLEG): container finished" podID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerID="a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af" exitCode=0 Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.873701 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerDied","Data":"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af"} Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.873718 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9lrt9" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.873747 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9lrt9" event={"ID":"3743da9c-1b50-4843-beb2-71222c86bd6e","Type":"ContainerDied","Data":"409dfef8060d59aff679a8b3d98edc65d41582964bb027bd28b766f6cc217061"} Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.873777 4938 scope.go:117] "RemoveContainer" containerID="a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.916733 4938 scope.go:117] "RemoveContainer" containerID="e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.922904 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.943768 4938 scope.go:117] "RemoveContainer" containerID="6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.949149 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9lrt9"] Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.979084 4938 scope.go:117] "RemoveContainer" containerID="a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af" Nov 22 11:27:53 crc kubenswrapper[4938]: E1122 11:27:53.979472 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af\": container with ID starting with a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af not found: ID does not exist" containerID="a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.979511 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af"} err="failed to get container status \"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af\": rpc error: code = NotFound desc = could not find container \"a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af\": container with ID starting with a28d0612ddf6913131bd3e62ad1c89465b0f3fdf3d8ea0caa1006b6f84d793af not found: ID does not exist" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.979537 4938 scope.go:117] "RemoveContainer" containerID="e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe" Nov 22 11:27:53 crc kubenswrapper[4938]: E1122 11:27:53.979779 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe\": container with ID starting with e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe not found: ID does not exist" containerID="e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.979810 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe"} err="failed to get container status \"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe\": rpc error: code = NotFound desc = could not find container \"e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe\": container with ID starting with e9187d8b9dadfdd7a772771300c64144a997fcb5b2d161fda9a404b171e86abe not found: ID does not exist" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.979828 4938 scope.go:117] "RemoveContainer" containerID="6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1" Nov 22 11:27:53 crc kubenswrapper[4938]: E1122 11:27:53.980130 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1\": container with ID starting with 6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1 not found: ID does not exist" containerID="6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1" Nov 22 11:27:53 crc kubenswrapper[4938]: I1122 11:27:53.980154 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1"} err="failed to get container status \"6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1\": rpc error: code = NotFound desc = could not find container \"6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1\": container with ID starting with 6325e7dcc027b9f822bcb3ff38f95863822d574f61d8c06349808c4cf1cf6ef1 not found: ID does not exist" Nov 22 11:27:54 crc kubenswrapper[4938]: I1122 11:27:54.457514 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" path="/var/lib/kubelet/pods/3743da9c-1b50-4843-beb2-71222c86bd6e/volumes" Nov 22 11:28:01 crc kubenswrapper[4938]: I1122 11:28:01.447797 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:28:01 crc kubenswrapper[4938]: E1122 11:28:01.448503 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:28:14 crc kubenswrapper[4938]: I1122 11:28:14.448049 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:28:14 crc kubenswrapper[4938]: E1122 11:28:14.448847 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:28:25 crc kubenswrapper[4938]: I1122 11:28:25.447437 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:28:25 crc kubenswrapper[4938]: E1122 11:28:25.448434 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:28:36 crc kubenswrapper[4938]: I1122 11:28:36.447902 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:28:36 crc kubenswrapper[4938]: E1122 11:28:36.448940 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:28:51 crc kubenswrapper[4938]: I1122 11:28:51.448280 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:28:51 crc kubenswrapper[4938]: E1122 11:28:51.449106 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:29:06 crc kubenswrapper[4938]: I1122 11:29:06.447648 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:29:06 crc kubenswrapper[4938]: E1122 11:29:06.448560 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:29:19 crc kubenswrapper[4938]: I1122 11:29:19.447415 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:29:19 crc kubenswrapper[4938]: E1122 11:29:19.448196 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:29:31 crc kubenswrapper[4938]: I1122 11:29:31.448223 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:29:31 crc kubenswrapper[4938]: E1122 11:29:31.448953 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:29:43 crc kubenswrapper[4938]: I1122 11:29:43.447676 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:29:43 crc kubenswrapper[4938]: E1122 11:29:43.448263 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:29:55 crc kubenswrapper[4938]: I1122 11:29:55.447804 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:29:55 crc kubenswrapper[4938]: E1122 11:29:55.449282 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.172213 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4"] Nov 22 11:30:00 crc kubenswrapper[4938]: E1122 11:30:00.173321 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.173336 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4938]: E1122 11:30:00.173372 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="extract-content" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.173381 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="extract-content" Nov 22 11:30:00 crc kubenswrapper[4938]: E1122 11:30:00.173393 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="extract-utilities" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.173400 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="extract-utilities" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.173587 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="3743da9c-1b50-4843-beb2-71222c86bd6e" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.174283 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.176423 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.177176 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.182960 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4"] Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.287002 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.287057 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.287199 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfb8k\" (UniqueName: \"kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.388562 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.388603 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.388708 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfb8k\" (UniqueName: \"kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.389496 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.395890 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.417118 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfb8k\" (UniqueName: \"kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k\") pod \"collect-profiles-29396850-mmzw4\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.520550 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:00 crc kubenswrapper[4938]: I1122 11:30:00.940401 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4"] Nov 22 11:30:00 crc kubenswrapper[4938]: W1122 11:30:00.950195 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34eb755d_2c8f_4d55_adbe_928554ad84af.slice/crio-75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba WatchSource:0}: Error finding container 75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba: Status 404 returned error can't find the container with id 75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba Nov 22 11:30:01 crc kubenswrapper[4938]: I1122 11:30:01.079255 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" event={"ID":"34eb755d-2c8f-4d55-adbe-928554ad84af","Type":"ContainerStarted","Data":"75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba"} Nov 22 11:30:02 crc kubenswrapper[4938]: I1122 11:30:02.094415 4938 generic.go:334] "Generic (PLEG): container finished" podID="34eb755d-2c8f-4d55-adbe-928554ad84af" containerID="aaaef5f9ed9101bfe1857a0487be377775e5665db3c67a275248aa2a5ba1298d" exitCode=0 Nov 22 11:30:02 crc kubenswrapper[4938]: I1122 11:30:02.094512 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" event={"ID":"34eb755d-2c8f-4d55-adbe-928554ad84af","Type":"ContainerDied","Data":"aaaef5f9ed9101bfe1857a0487be377775e5665db3c67a275248aa2a5ba1298d"} Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.511189 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.654489 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume\") pod \"34eb755d-2c8f-4d55-adbe-928554ad84af\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.654623 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfb8k\" (UniqueName: \"kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k\") pod \"34eb755d-2c8f-4d55-adbe-928554ad84af\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.654739 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume\") pod \"34eb755d-2c8f-4d55-adbe-928554ad84af\" (UID: \"34eb755d-2c8f-4d55-adbe-928554ad84af\") " Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.655233 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume" (OuterVolumeSpecName: "config-volume") pod "34eb755d-2c8f-4d55-adbe-928554ad84af" (UID: "34eb755d-2c8f-4d55-adbe-928554ad84af"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.655664 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/34eb755d-2c8f-4d55-adbe-928554ad84af-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.660000 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "34eb755d-2c8f-4d55-adbe-928554ad84af" (UID: "34eb755d-2c8f-4d55-adbe-928554ad84af"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.660176 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k" (OuterVolumeSpecName: "kube-api-access-mfb8k") pod "34eb755d-2c8f-4d55-adbe-928554ad84af" (UID: "34eb755d-2c8f-4d55-adbe-928554ad84af"). InnerVolumeSpecName "kube-api-access-mfb8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.757994 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/34eb755d-2c8f-4d55-adbe-928554ad84af-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:03 crc kubenswrapper[4938]: I1122 11:30:03.758048 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfb8k\" (UniqueName: \"kubernetes.io/projected/34eb755d-2c8f-4d55-adbe-928554ad84af-kube-api-access-mfb8k\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:04 crc kubenswrapper[4938]: I1122 11:30:04.114030 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" event={"ID":"34eb755d-2c8f-4d55-adbe-928554ad84af","Type":"ContainerDied","Data":"75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba"} Nov 22 11:30:04 crc kubenswrapper[4938]: I1122 11:30:04.114078 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75dadd5e1b96d5426abeb7335a401537a6111b2d65c79fcc193a819850f35bba" Nov 22 11:30:04 crc kubenswrapper[4938]: I1122 11:30:04.114093 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-mmzw4" Nov 22 11:30:04 crc kubenswrapper[4938]: I1122 11:30:04.585673 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58"] Nov 22 11:30:04 crc kubenswrapper[4938]: I1122 11:30:04.592707 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-phk58"] Nov 22 11:30:06 crc kubenswrapper[4938]: I1122 11:30:06.464400 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebae4252-359f-4d1a-9461-8991ec435c0f" path="/var/lib/kubelet/pods/ebae4252-359f-4d1a-9461-8991ec435c0f/volumes" Nov 22 11:30:07 crc kubenswrapper[4938]: I1122 11:30:07.447465 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:30:07 crc kubenswrapper[4938]: E1122 11:30:07.447953 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:30:18 crc kubenswrapper[4938]: I1122 11:30:18.461132 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:30:18 crc kubenswrapper[4938]: E1122 11:30:18.462191 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:30:26 crc kubenswrapper[4938]: I1122 11:30:26.547136 4938 scope.go:117] "RemoveContainer" containerID="dae273297a76fdfbd54636b312efb2a0a3daa844151d2f2106f0c2a38c177ae7" Nov 22 11:30:33 crc kubenswrapper[4938]: I1122 11:30:33.447674 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:30:33 crc kubenswrapper[4938]: E1122 11:30:33.448465 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:30:48 crc kubenswrapper[4938]: I1122 11:30:48.451185 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:30:48 crc kubenswrapper[4938]: E1122 11:30:48.451977 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:31:02 crc kubenswrapper[4938]: I1122 11:31:02.448211 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:31:02 crc kubenswrapper[4938]: E1122 11:31:02.449069 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:31:14 crc kubenswrapper[4938]: I1122 11:31:14.447479 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:31:14 crc kubenswrapper[4938]: E1122 11:31:14.448436 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:31:26 crc kubenswrapper[4938]: I1122 11:31:26.447993 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:31:26 crc kubenswrapper[4938]: E1122 11:31:26.448741 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:31:40 crc kubenswrapper[4938]: I1122 11:31:40.447328 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:31:40 crc kubenswrapper[4938]: E1122 11:31:40.448468 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:31:51 crc kubenswrapper[4938]: I1122 11:31:51.447548 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:31:52 crc kubenswrapper[4938]: I1122 11:31:52.125221 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad"} Nov 22 11:34:11 crc kubenswrapper[4938]: I1122 11:34:11.300996 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:34:11 crc kubenswrapper[4938]: I1122 11:34:11.301579 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.124693 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:31 crc kubenswrapper[4938]: E1122 11:34:31.125801 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34eb755d-2c8f-4d55-adbe-928554ad84af" containerName="collect-profiles" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.125816 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="34eb755d-2c8f-4d55-adbe-928554ad84af" containerName="collect-profiles" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.126022 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="34eb755d-2c8f-4d55-adbe-928554ad84af" containerName="collect-profiles" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.127263 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.144843 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.322737 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.323226 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mp5h\" (UniqueName: \"kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.323296 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.425478 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.425695 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mp5h\" (UniqueName: \"kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.425759 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.426046 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.426201 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.444811 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mp5h\" (UniqueName: \"kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h\") pod \"redhat-operators-t22qf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.458971 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.538675 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.541050 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.549611 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.736935 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lp8m5\" (UniqueName: \"kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.737309 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.737329 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.838636 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lp8m5\" (UniqueName: \"kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.838695 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.838719 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.839177 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.839242 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.859834 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lp8m5\" (UniqueName: \"kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5\") pod \"community-operators-6wbwf\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:31 crc kubenswrapper[4938]: I1122 11:34:31.875465 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.006540 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.341612 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.601138 4938 generic.go:334] "Generic (PLEG): container finished" podID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerID="fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81" exitCode=0 Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.601224 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerDied","Data":"fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81"} Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.601315 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerStarted","Data":"ea072b809a275849d6898a791aa67f4722c392d281631dbea9978ce6a9174585"} Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.609626 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.610491 4938 generic.go:334] "Generic (PLEG): container finished" podID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerID="a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85" exitCode=0 Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.610526 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerDied","Data":"a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85"} Nov 22 11:34:32 crc kubenswrapper[4938]: I1122 11:34:32.610658 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerStarted","Data":"f543c9be14a785acfbdc0684588c212145043a9ae24ee9c81c9b55fd614ec2a7"} Nov 22 11:34:33 crc kubenswrapper[4938]: I1122 11:34:33.622238 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerStarted","Data":"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b"} Nov 22 11:34:33 crc kubenswrapper[4938]: I1122 11:34:33.625320 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerStarted","Data":"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f"} Nov 22 11:34:34 crc kubenswrapper[4938]: I1122 11:34:34.636622 4938 generic.go:334] "Generic (PLEG): container finished" podID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerID="59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f" exitCode=0 Nov 22 11:34:34 crc kubenswrapper[4938]: I1122 11:34:34.636665 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerDied","Data":"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f"} Nov 22 11:34:35 crc kubenswrapper[4938]: I1122 11:34:35.651150 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerStarted","Data":"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce"} Nov 22 11:34:35 crc kubenswrapper[4938]: I1122 11:34:35.652683 4938 generic.go:334] "Generic (PLEG): container finished" podID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerID="d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b" exitCode=0 Nov 22 11:34:35 crc kubenswrapper[4938]: I1122 11:34:35.652741 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerDied","Data":"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b"} Nov 22 11:34:35 crc kubenswrapper[4938]: I1122 11:34:35.679470 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6wbwf" podStartSLOduration=2.233134875 podStartE2EDuration="4.679440345s" podCreationTimestamp="2025-11-22 11:34:31 +0000 UTC" firstStartedPulling="2025-11-22 11:34:32.615107939 +0000 UTC m=+3405.082945338" lastFinishedPulling="2025-11-22 11:34:35.061413399 +0000 UTC m=+3407.529250808" observedRunningTime="2025-11-22 11:34:35.67601849 +0000 UTC m=+3408.143855889" watchObservedRunningTime="2025-11-22 11:34:35.679440345 +0000 UTC m=+3408.147277754" Nov 22 11:34:37 crc kubenswrapper[4938]: I1122 11:34:37.671064 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerStarted","Data":"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5"} Nov 22 11:34:37 crc kubenswrapper[4938]: I1122 11:34:37.689200 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t22qf" podStartSLOduration=2.5642333539999997 podStartE2EDuration="6.68917979s" podCreationTimestamp="2025-11-22 11:34:31 +0000 UTC" firstStartedPulling="2025-11-22 11:34:32.609437348 +0000 UTC m=+3405.077274747" lastFinishedPulling="2025-11-22 11:34:36.734383774 +0000 UTC m=+3409.202221183" observedRunningTime="2025-11-22 11:34:37.687120529 +0000 UTC m=+3410.154957948" watchObservedRunningTime="2025-11-22 11:34:37.68917979 +0000 UTC m=+3410.157017199" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.300805 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.301575 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.459843 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.459894 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.875978 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.876035 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:41 crc kubenswrapper[4938]: I1122 11:34:41.917792 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:42 crc kubenswrapper[4938]: I1122 11:34:42.519891 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t22qf" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="registry-server" probeResult="failure" output=< Nov 22 11:34:42 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 11:34:42 crc kubenswrapper[4938]: > Nov 22 11:34:42 crc kubenswrapper[4938]: I1122 11:34:42.780642 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:42 crc kubenswrapper[4938]: I1122 11:34:42.839079 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:44 crc kubenswrapper[4938]: I1122 11:34:44.734540 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6wbwf" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="registry-server" containerID="cri-o://4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce" gracePeriod=2 Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.242331 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.416956 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities\") pod \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.417554 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lp8m5\" (UniqueName: \"kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5\") pod \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.417672 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content\") pod \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\" (UID: \"4e7ebc00-d0c9-4470-a037-cdf3248938f2\") " Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.418336 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities" (OuterVolumeSpecName: "utilities") pod "4e7ebc00-d0c9-4470-a037-cdf3248938f2" (UID: "4e7ebc00-d0c9-4470-a037-cdf3248938f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.435263 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5" (OuterVolumeSpecName: "kube-api-access-lp8m5") pod "4e7ebc00-d0c9-4470-a037-cdf3248938f2" (UID: "4e7ebc00-d0c9-4470-a037-cdf3248938f2"). InnerVolumeSpecName "kube-api-access-lp8m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.474757 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e7ebc00-d0c9-4470-a037-cdf3248938f2" (UID: "4e7ebc00-d0c9-4470-a037-cdf3248938f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.520230 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.520266 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lp8m5\" (UniqueName: \"kubernetes.io/projected/4e7ebc00-d0c9-4470-a037-cdf3248938f2-kube-api-access-lp8m5\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.520275 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e7ebc00-d0c9-4470-a037-cdf3248938f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.744412 4938 generic.go:334] "Generic (PLEG): container finished" podID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerID="4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce" exitCode=0 Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.744461 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerDied","Data":"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce"} Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.744495 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wbwf" event={"ID":"4e7ebc00-d0c9-4470-a037-cdf3248938f2","Type":"ContainerDied","Data":"f543c9be14a785acfbdc0684588c212145043a9ae24ee9c81c9b55fd614ec2a7"} Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.744492 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wbwf" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.744567 4938 scope.go:117] "RemoveContainer" containerID="4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.779197 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.786782 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6wbwf"] Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.791234 4938 scope.go:117] "RemoveContainer" containerID="59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.815499 4938 scope.go:117] "RemoveContainer" containerID="a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.860443 4938 scope.go:117] "RemoveContainer" containerID="4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce" Nov 22 11:34:45 crc kubenswrapper[4938]: E1122 11:34:45.861240 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce\": container with ID starting with 4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce not found: ID does not exist" containerID="4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.861299 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce"} err="failed to get container status \"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce\": rpc error: code = NotFound desc = could not find container \"4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce\": container with ID starting with 4b3ed2a78439272c4b79b5c807ff79a816264eb9bd815019cb90c3cd93437cce not found: ID does not exist" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.861339 4938 scope.go:117] "RemoveContainer" containerID="59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f" Nov 22 11:34:45 crc kubenswrapper[4938]: E1122 11:34:45.861954 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f\": container with ID starting with 59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f not found: ID does not exist" containerID="59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.861996 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f"} err="failed to get container status \"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f\": rpc error: code = NotFound desc = could not find container \"59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f\": container with ID starting with 59a748d635c5c15892cb89cfb2b79ec6cb55e510f36961aa2b9dcb7fa5ae536f not found: ID does not exist" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.862024 4938 scope.go:117] "RemoveContainer" containerID="a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85" Nov 22 11:34:45 crc kubenswrapper[4938]: E1122 11:34:45.862434 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85\": container with ID starting with a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85 not found: ID does not exist" containerID="a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85" Nov 22 11:34:45 crc kubenswrapper[4938]: I1122 11:34:45.862464 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85"} err="failed to get container status \"a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85\": rpc error: code = NotFound desc = could not find container \"a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85\": container with ID starting with a1b30850d540275f6331a881217766b7364f0e78dfef3cebd64d5cd1785bec85 not found: ID does not exist" Nov 22 11:34:46 crc kubenswrapper[4938]: I1122 11:34:46.457005 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" path="/var/lib/kubelet/pods/4e7ebc00-d0c9-4470-a037-cdf3248938f2/volumes" Nov 22 11:34:51 crc kubenswrapper[4938]: I1122 11:34:51.505868 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:51 crc kubenswrapper[4938]: I1122 11:34:51.550500 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:51 crc kubenswrapper[4938]: I1122 11:34:51.743236 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:52 crc kubenswrapper[4938]: I1122 11:34:52.809971 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t22qf" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="registry-server" containerID="cri-o://1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5" gracePeriod=2 Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.301316 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.464348 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities\") pod \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.464502 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mp5h\" (UniqueName: \"kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h\") pod \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.464546 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content\") pod \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\" (UID: \"dddae05f-3261-4f99-a663-74dcf4c0e9bf\") " Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.466258 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities" (OuterVolumeSpecName: "utilities") pod "dddae05f-3261-4f99-a663-74dcf4c0e9bf" (UID: "dddae05f-3261-4f99-a663-74dcf4c0e9bf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.469965 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h" (OuterVolumeSpecName: "kube-api-access-8mp5h") pod "dddae05f-3261-4f99-a663-74dcf4c0e9bf" (UID: "dddae05f-3261-4f99-a663-74dcf4c0e9bf"). InnerVolumeSpecName "kube-api-access-8mp5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.558894 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dddae05f-3261-4f99-a663-74dcf4c0e9bf" (UID: "dddae05f-3261-4f99-a663-74dcf4c0e9bf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.567457 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mp5h\" (UniqueName: \"kubernetes.io/projected/dddae05f-3261-4f99-a663-74dcf4c0e9bf-kube-api-access-8mp5h\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.567495 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.567507 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dddae05f-3261-4f99-a663-74dcf4c0e9bf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.820460 4938 generic.go:334] "Generic (PLEG): container finished" podID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerID="1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5" exitCode=0 Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.820498 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerDied","Data":"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5"} Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.820523 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t22qf" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.820532 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t22qf" event={"ID":"dddae05f-3261-4f99-a663-74dcf4c0e9bf","Type":"ContainerDied","Data":"ea072b809a275849d6898a791aa67f4722c392d281631dbea9978ce6a9174585"} Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.820549 4938 scope.go:117] "RemoveContainer" containerID="1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.846435 4938 scope.go:117] "RemoveContainer" containerID="d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.858008 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.865665 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t22qf"] Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.874654 4938 scope.go:117] "RemoveContainer" containerID="fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.926173 4938 scope.go:117] "RemoveContainer" containerID="1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5" Nov 22 11:34:53 crc kubenswrapper[4938]: E1122 11:34:53.926965 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5\": container with ID starting with 1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5 not found: ID does not exist" containerID="1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.927067 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5"} err="failed to get container status \"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5\": rpc error: code = NotFound desc = could not find container \"1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5\": container with ID starting with 1809bef2fe52d3605bf5ef2584be8e23a623783859245d8712454eb84d9e1ec5 not found: ID does not exist" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.927100 4938 scope.go:117] "RemoveContainer" containerID="d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b" Nov 22 11:34:53 crc kubenswrapper[4938]: E1122 11:34:53.927524 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b\": container with ID starting with d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b not found: ID does not exist" containerID="d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.927570 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b"} err="failed to get container status \"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b\": rpc error: code = NotFound desc = could not find container \"d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b\": container with ID starting with d5a4b3d864831e4edd8204b576b0951c9cb351698532b318142ac8e742fe776b not found: ID does not exist" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.927598 4938 scope.go:117] "RemoveContainer" containerID="fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81" Nov 22 11:34:53 crc kubenswrapper[4938]: E1122 11:34:53.927995 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81\": container with ID starting with fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81 not found: ID does not exist" containerID="fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81" Nov 22 11:34:53 crc kubenswrapper[4938]: I1122 11:34:53.928028 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81"} err="failed to get container status \"fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81\": rpc error: code = NotFound desc = could not find container \"fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81\": container with ID starting with fccb61cf2aaaaed7a915e49523b38424f5bd56ed55206295ba230e33416c2e81 not found: ID does not exist" Nov 22 11:34:54 crc kubenswrapper[4938]: I1122 11:34:54.469543 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" path="/var/lib/kubelet/pods/dddae05f-3261-4f99-a663-74dcf4c0e9bf/volumes" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.053870 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055025 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055039 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055057 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="extract-content" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055065 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="extract-content" Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055081 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="extract-content" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055087 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="extract-content" Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055100 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="extract-utilities" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055107 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="extract-utilities" Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055135 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="extract-utilities" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055141 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="extract-utilities" Nov 22 11:34:58 crc kubenswrapper[4938]: E1122 11:34:58.055159 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055164 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055345 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="dddae05f-3261-4f99-a663-74dcf4c0e9bf" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.055376 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e7ebc00-d0c9-4470-a037-cdf3248938f2" containerName="registry-server" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.056862 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.065494 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.159065 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.159128 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.159160 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q7q5\" (UniqueName: \"kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.263519 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.263577 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.263606 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q7q5\" (UniqueName: \"kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.264170 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.264173 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.285057 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q7q5\" (UniqueName: \"kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5\") pod \"redhat-marketplace-bt9wp\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.386406 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.710163 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:34:58 crc kubenswrapper[4938]: I1122 11:34:58.876185 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerStarted","Data":"26d30588e94636e8ce62201cbf76d6169420c202746c74bb9747c5292cd587f0"} Nov 22 11:34:59 crc kubenswrapper[4938]: E1122 11:34:59.152450 4938 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21ca4f08_eaa1_4b2b_a500_c46f6aef47ca.slice/crio-12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1.scope\": RecentStats: unable to find data in memory cache]" Nov 22 11:34:59 crc kubenswrapper[4938]: I1122 11:34:59.885499 4938 generic.go:334] "Generic (PLEG): container finished" podID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerID="12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1" exitCode=0 Nov 22 11:34:59 crc kubenswrapper[4938]: I1122 11:34:59.885538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerDied","Data":"12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1"} Nov 22 11:35:01 crc kubenswrapper[4938]: I1122 11:35:01.902484 4938 generic.go:334] "Generic (PLEG): container finished" podID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerID="6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86" exitCode=0 Nov 22 11:35:01 crc kubenswrapper[4938]: I1122 11:35:01.902566 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerDied","Data":"6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86"} Nov 22 11:35:02 crc kubenswrapper[4938]: I1122 11:35:02.920162 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerStarted","Data":"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a"} Nov 22 11:35:02 crc kubenswrapper[4938]: I1122 11:35:02.944005 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bt9wp" podStartSLOduration=2.524381761 podStartE2EDuration="4.94397999s" podCreationTimestamp="2025-11-22 11:34:58 +0000 UTC" firstStartedPulling="2025-11-22 11:34:59.887308413 +0000 UTC m=+3432.355145812" lastFinishedPulling="2025-11-22 11:35:02.306906622 +0000 UTC m=+3434.774744041" observedRunningTime="2025-11-22 11:35:02.939206071 +0000 UTC m=+3435.407043470" watchObservedRunningTime="2025-11-22 11:35:02.94397999 +0000 UTC m=+3435.411817399" Nov 22 11:35:08 crc kubenswrapper[4938]: I1122 11:35:08.387382 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:08 crc kubenswrapper[4938]: I1122 11:35:08.388153 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:08 crc kubenswrapper[4938]: I1122 11:35:08.462531 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:09 crc kubenswrapper[4938]: I1122 11:35:09.025513 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:09 crc kubenswrapper[4938]: I1122 11:35:09.065645 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.000998 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bt9wp" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="registry-server" containerID="cri-o://69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a" gracePeriod=2 Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.301132 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.301192 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.301242 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.302050 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.302111 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad" gracePeriod=600 Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.587536 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.732677 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities\") pod \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.732742 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q7q5\" (UniqueName: \"kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5\") pod \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.732795 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content\") pod \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\" (UID: \"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca\") " Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.733557 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities" (OuterVolumeSpecName: "utilities") pod "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" (UID: "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.744846 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5" (OuterVolumeSpecName: "kube-api-access-7q7q5") pod "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" (UID: "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca"). InnerVolumeSpecName "kube-api-access-7q7q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.752854 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" (UID: "21ca4f08-eaa1-4b2b-a500-c46f6aef47ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.835019 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.835051 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q7q5\" (UniqueName: \"kubernetes.io/projected/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-kube-api-access-7q7q5\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:11 crc kubenswrapper[4938]: I1122 11:35:11.835061 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.013449 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad" exitCode=0 Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.013532 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad"} Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.013582 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af"} Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.013605 4938 scope.go:117] "RemoveContainer" containerID="3a2f7f55e4a12d5f40e7860f464eb069638187e53cdef1f0e0457edca88ed4a0" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.017437 4938 generic.go:334] "Generic (PLEG): container finished" podID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerID="69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a" exitCode=0 Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.017468 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bt9wp" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.017481 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerDied","Data":"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a"} Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.017702 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bt9wp" event={"ID":"21ca4f08-eaa1-4b2b-a500-c46f6aef47ca","Type":"ContainerDied","Data":"26d30588e94636e8ce62201cbf76d6169420c202746c74bb9747c5292cd587f0"} Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.055013 4938 scope.go:117] "RemoveContainer" containerID="69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.078333 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.082025 4938 scope.go:117] "RemoveContainer" containerID="6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.086652 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bt9wp"] Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.108645 4938 scope.go:117] "RemoveContainer" containerID="12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.171828 4938 scope.go:117] "RemoveContainer" containerID="69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a" Nov 22 11:35:12 crc kubenswrapper[4938]: E1122 11:35:12.172443 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a\": container with ID starting with 69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a not found: ID does not exist" containerID="69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.172499 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a"} err="failed to get container status \"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a\": rpc error: code = NotFound desc = could not find container \"69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a\": container with ID starting with 69647184c4e2aed9cd084e2265fd06386db61c41255f7525a6dae21fdd534c8a not found: ID does not exist" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.172536 4938 scope.go:117] "RemoveContainer" containerID="6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86" Nov 22 11:35:12 crc kubenswrapper[4938]: E1122 11:35:12.173042 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86\": container with ID starting with 6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86 not found: ID does not exist" containerID="6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.173103 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86"} err="failed to get container status \"6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86\": rpc error: code = NotFound desc = could not find container \"6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86\": container with ID starting with 6526bb5e9e2d1fbb1f0e62a15f3a8c6e7e89ce830f7890d752b65a5df70d3c86 not found: ID does not exist" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.173140 4938 scope.go:117] "RemoveContainer" containerID="12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1" Nov 22 11:35:12 crc kubenswrapper[4938]: E1122 11:35:12.173508 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1\": container with ID starting with 12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1 not found: ID does not exist" containerID="12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.173542 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1"} err="failed to get container status \"12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1\": rpc error: code = NotFound desc = could not find container \"12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1\": container with ID starting with 12eea2bd1c1f05bc9297cca26870b84d5bb7682bfc512f54c440998e5eeadab1 not found: ID does not exist" Nov 22 11:35:12 crc kubenswrapper[4938]: I1122 11:35:12.459828 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" path="/var/lib/kubelet/pods/21ca4f08-eaa1-4b2b-a500-c46f6aef47ca/volumes" Nov 22 11:36:30 crc kubenswrapper[4938]: I1122 11:36:30.795714 4938 generic.go:334] "Generic (PLEG): container finished" podID="7624b768-90d5-4bad-b97e-21ea6549679a" containerID="7e5553a7a9d309ab8a47ea838f9575be2432066c85fe9157f462061037c75f8f" exitCode=0 Nov 22 11:36:30 crc kubenswrapper[4938]: I1122 11:36:30.795852 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7624b768-90d5-4bad-b97e-21ea6549679a","Type":"ContainerDied","Data":"7e5553a7a9d309ab8a47ea838f9575be2432066c85fe9157f462061037c75f8f"} Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.151159 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270495 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270636 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270676 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270717 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270766 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270839 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.270904 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.271003 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjm67\" (UniqueName: \"kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.271607 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data" (OuterVolumeSpecName: "config-data") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.271721 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary\") pod \"7624b768-90d5-4bad-b97e-21ea6549679a\" (UID: \"7624b768-90d5-4bad-b97e-21ea6549679a\") " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.272479 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.273087 4938 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.273122 4938 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.273950 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.276121 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67" (OuterVolumeSpecName: "kube-api-access-sjm67") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "kube-api-access-sjm67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.290506 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.299715 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.307682 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.315677 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.334483 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "7624b768-90d5-4bad-b97e-21ea6549679a" (UID: "7624b768-90d5-4bad-b97e-21ea6549679a"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.375992 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjm67\" (UniqueName: \"kubernetes.io/projected/7624b768-90d5-4bad-b97e-21ea6549679a-kube-api-access-sjm67\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376046 4938 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7624b768-90d5-4bad-b97e-21ea6549679a-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376066 4938 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376087 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376107 4938 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7624b768-90d5-4bad-b97e-21ea6549679a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376128 4938 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7624b768-90d5-4bad-b97e-21ea6549679a-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.376174 4938 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.421925 4938 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.477620 4938 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.817230 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7624b768-90d5-4bad-b97e-21ea6549679a","Type":"ContainerDied","Data":"e9060d9d5dc34ae9aced52b8f8ffd3ba58b4566540a6a6666fe94fb9681a24ea"} Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.817276 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9060d9d5dc34ae9aced52b8f8ffd3ba58b4566540a6a6666fe94fb9681a24ea" Nov 22 11:36:32 crc kubenswrapper[4938]: I1122 11:36:32.817276 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.873718 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:36:40 crc kubenswrapper[4938]: E1122 11:36:40.874728 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7624b768-90d5-4bad-b97e-21ea6549679a" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.874745 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="7624b768-90d5-4bad-b97e-21ea6549679a" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:36:40 crc kubenswrapper[4938]: E1122 11:36:40.874766 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="registry-server" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.874772 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="registry-server" Nov 22 11:36:40 crc kubenswrapper[4938]: E1122 11:36:40.874800 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="extract-utilities" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.874805 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="extract-utilities" Nov 22 11:36:40 crc kubenswrapper[4938]: E1122 11:36:40.874814 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="extract-content" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.874820 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="extract-content" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.874990 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="7624b768-90d5-4bad-b97e-21ea6549679a" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.875022 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="21ca4f08-eaa1-4b2b-a500-c46f6aef47ca" containerName="registry-server" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.875632 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.879273 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-ldj5t" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.886672 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.992489 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:40 crc kubenswrapper[4938]: I1122 11:36:40.992668 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82cmz\" (UniqueName: \"kubernetes.io/projected/aeda96c8-4659-41c2-947d-f10a1c61bee0-kube-api-access-82cmz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.093942 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82cmz\" (UniqueName: \"kubernetes.io/projected/aeda96c8-4659-41c2-947d-f10a1c61bee0-kube-api-access-82cmz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.094072 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.094579 4938 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.118942 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82cmz\" (UniqueName: \"kubernetes.io/projected/aeda96c8-4659-41c2-947d-f10a1c61bee0-kube-api-access-82cmz\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.120735 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"aeda96c8-4659-41c2-947d-f10a1c61bee0\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.195110 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:36:41 crc kubenswrapper[4938]: I1122 11:36:41.694496 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:36:42 crc kubenswrapper[4938]: I1122 11:36:42.004386 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"aeda96c8-4659-41c2-947d-f10a1c61bee0","Type":"ContainerStarted","Data":"2966e2fb94f0717babef7edfbcf2d8d0918d2f14f31df7ca1d8ab18feddc5347"} Nov 22 11:36:43 crc kubenswrapper[4938]: I1122 11:36:43.013204 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"aeda96c8-4659-41c2-947d-f10a1c61bee0","Type":"ContainerStarted","Data":"4f5766c2a9ac06f7bdb5d460a5b140017c151ed40b6fd1b4bcce7a65ab2c3880"} Nov 22 11:36:43 crc kubenswrapper[4938]: I1122 11:36:43.034699 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.199772916 podStartE2EDuration="3.034660839s" podCreationTimestamp="2025-11-22 11:36:40 +0000 UTC" firstStartedPulling="2025-11-22 11:36:41.704507968 +0000 UTC m=+3534.172345367" lastFinishedPulling="2025-11-22 11:36:42.539395891 +0000 UTC m=+3535.007233290" observedRunningTime="2025-11-22 11:36:43.024227851 +0000 UTC m=+3535.492065270" watchObservedRunningTime="2025-11-22 11:36:43.034660839 +0000 UTC m=+3535.502498288" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.164736 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pvr4m/must-gather-tn9hb"] Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.167605 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.170106 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pvr4m"/"kube-root-ca.crt" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.170349 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pvr4m"/"openshift-service-ca.crt" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.196719 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pvr4m/must-gather-tn9hb"] Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.304225 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.304354 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl5t4\" (UniqueName: \"kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.405754 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl5t4\" (UniqueName: \"kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.406085 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.407179 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.442319 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl5t4\" (UniqueName: \"kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4\") pod \"must-gather-tn9hb\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.493442 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:37:05 crc kubenswrapper[4938]: I1122 11:37:05.820098 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pvr4m/must-gather-tn9hb"] Nov 22 11:37:06 crc kubenswrapper[4938]: I1122 11:37:06.251467 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" event={"ID":"cf39f269-fac4-4d09-9d9b-39608937d005","Type":"ContainerStarted","Data":"92cc9cc473def698f5e81c27d61a1da6ceb4b334e057abfb1f80cd59d4fcf455"} Nov 22 11:37:11 crc kubenswrapper[4938]: I1122 11:37:11.300977 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:37:11 crc kubenswrapper[4938]: I1122 11:37:11.301797 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:37:12 crc kubenswrapper[4938]: I1122 11:37:12.311291 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" event={"ID":"cf39f269-fac4-4d09-9d9b-39608937d005","Type":"ContainerStarted","Data":"5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053"} Nov 22 11:37:12 crc kubenswrapper[4938]: I1122 11:37:12.311627 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" event={"ID":"cf39f269-fac4-4d09-9d9b-39608937d005","Type":"ContainerStarted","Data":"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252"} Nov 22 11:37:12 crc kubenswrapper[4938]: I1122 11:37:12.334029 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" podStartSLOduration=1.803685691 podStartE2EDuration="7.333987263s" podCreationTimestamp="2025-11-22 11:37:05 +0000 UTC" firstStartedPulling="2025-11-22 11:37:05.825089307 +0000 UTC m=+3558.292926706" lastFinishedPulling="2025-11-22 11:37:11.355390879 +0000 UTC m=+3563.823228278" observedRunningTime="2025-11-22 11:37:12.323497723 +0000 UTC m=+3564.791335122" watchObservedRunningTime="2025-11-22 11:37:12.333987263 +0000 UTC m=+3564.801824662" Nov 22 11:37:14 crc kubenswrapper[4938]: I1122 11:37:14.875196 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-xds9t"] Nov 22 11:37:14 crc kubenswrapper[4938]: I1122 11:37:14.876904 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:14 crc kubenswrapper[4938]: I1122 11:37:14.879056 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pvr4m"/"default-dockercfg-gvln8" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.006690 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.006825 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzz78\" (UniqueName: \"kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.108476 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.108597 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzz78\" (UniqueName: \"kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.109016 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.136066 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzz78\" (UniqueName: \"kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78\") pod \"crc-debug-xds9t\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.195989 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:37:15 crc kubenswrapper[4938]: I1122 11:37:15.336514 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" event={"ID":"3afd8565-c6ca-44fe-b758-696cc92eab90","Type":"ContainerStarted","Data":"edb8a9adb9a5a90bc1dbb653f0ce36ffeb29f528baf0d62969075840ba80e0dc"} Nov 22 11:37:27 crc kubenswrapper[4938]: I1122 11:37:27.438351 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" event={"ID":"3afd8565-c6ca-44fe-b758-696cc92eab90","Type":"ContainerStarted","Data":"3a1c84d050534943802ec556d3cfc19cc3eb4e4ffa441468c2e579d9f15223b9"} Nov 22 11:37:27 crc kubenswrapper[4938]: I1122 11:37:27.459265 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" podStartSLOduration=2.334796183 podStartE2EDuration="13.459241752s" podCreationTimestamp="2025-11-22 11:37:14 +0000 UTC" firstStartedPulling="2025-11-22 11:37:15.243416464 +0000 UTC m=+3567.711253863" lastFinishedPulling="2025-11-22 11:37:26.367862033 +0000 UTC m=+3578.835699432" observedRunningTime="2025-11-22 11:37:27.453067951 +0000 UTC m=+3579.920905360" watchObservedRunningTime="2025-11-22 11:37:27.459241752 +0000 UTC m=+3579.927079151" Nov 22 11:37:41 crc kubenswrapper[4938]: I1122 11:37:41.301023 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:37:41 crc kubenswrapper[4938]: I1122 11:37:41.301614 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:38:07 crc kubenswrapper[4938]: I1122 11:38:07.806498 4938 generic.go:334] "Generic (PLEG): container finished" podID="3afd8565-c6ca-44fe-b758-696cc92eab90" containerID="3a1c84d050534943802ec556d3cfc19cc3eb4e4ffa441468c2e579d9f15223b9" exitCode=0 Nov 22 11:38:07 crc kubenswrapper[4938]: I1122 11:38:07.806605 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" event={"ID":"3afd8565-c6ca-44fe-b758-696cc92eab90","Type":"ContainerDied","Data":"3a1c84d050534943802ec556d3cfc19cc3eb4e4ffa441468c2e579d9f15223b9"} Nov 22 11:38:08 crc kubenswrapper[4938]: I1122 11:38:08.941437 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:38:08 crc kubenswrapper[4938]: I1122 11:38:08.973053 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-xds9t"] Nov 22 11:38:08 crc kubenswrapper[4938]: I1122 11:38:08.984988 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-xds9t"] Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.065289 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host\") pod \"3afd8565-c6ca-44fe-b758-696cc92eab90\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.065385 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzz78\" (UniqueName: \"kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78\") pod \"3afd8565-c6ca-44fe-b758-696cc92eab90\" (UID: \"3afd8565-c6ca-44fe-b758-696cc92eab90\") " Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.065903 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host" (OuterVolumeSpecName: "host") pod "3afd8565-c6ca-44fe-b758-696cc92eab90" (UID: "3afd8565-c6ca-44fe-b758-696cc92eab90"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.066454 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3afd8565-c6ca-44fe-b758-696cc92eab90-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.075255 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78" (OuterVolumeSpecName: "kube-api-access-qzz78") pod "3afd8565-c6ca-44fe-b758-696cc92eab90" (UID: "3afd8565-c6ca-44fe-b758-696cc92eab90"). InnerVolumeSpecName "kube-api-access-qzz78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.168664 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzz78\" (UniqueName: \"kubernetes.io/projected/3afd8565-c6ca-44fe-b758-696cc92eab90-kube-api-access-qzz78\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.828661 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edb8a9adb9a5a90bc1dbb653f0ce36ffeb29f528baf0d62969075840ba80e0dc" Nov 22 11:38:09 crc kubenswrapper[4938]: I1122 11:38:09.828752 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-xds9t" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.127771 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-d7zqx"] Nov 22 11:38:10 crc kubenswrapper[4938]: E1122 11:38:10.128242 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3afd8565-c6ca-44fe-b758-696cc92eab90" containerName="container-00" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.128256 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="3afd8565-c6ca-44fe-b758-696cc92eab90" containerName="container-00" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.128421 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="3afd8565-c6ca-44fe-b758-696cc92eab90" containerName="container-00" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.129033 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.131292 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pvr4m"/"default-dockercfg-gvln8" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.288807 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.289052 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vskd6\" (UniqueName: \"kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.391390 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.391596 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vskd6\" (UniqueName: \"kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.391613 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.407920 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vskd6\" (UniqueName: \"kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6\") pod \"crc-debug-d7zqx\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.444995 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.467328 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3afd8565-c6ca-44fe-b758-696cc92eab90" path="/var/lib/kubelet/pods/3afd8565-c6ca-44fe-b758-696cc92eab90/volumes" Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.839023 4938 generic.go:334] "Generic (PLEG): container finished" podID="b791ecd2-3990-47d9-a423-a48cc1d07716" containerID="3212189e83402526a57291320c1a7c2ceb727f354a79a319b6bbe64805eeab40" exitCode=0 Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.839124 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" event={"ID":"b791ecd2-3990-47d9-a423-a48cc1d07716","Type":"ContainerDied","Data":"3212189e83402526a57291320c1a7c2ceb727f354a79a319b6bbe64805eeab40"} Nov 22 11:38:10 crc kubenswrapper[4938]: I1122 11:38:10.839582 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" event={"ID":"b791ecd2-3990-47d9-a423-a48cc1d07716","Type":"ContainerStarted","Data":"2d222c90b549b7eb35151e0e47015e8cc0999b82e4837a8d3079455ff77ec47f"} Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.300463 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.300517 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.300561 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.301282 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.301341 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" gracePeriod=600 Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.334944 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-d7zqx"] Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.341827 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-d7zqx"] Nov 22 11:38:11 crc kubenswrapper[4938]: E1122 11:38:11.439566 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.850887 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" exitCode=0 Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.850950 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af"} Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.851306 4938 scope.go:117] "RemoveContainer" containerID="64fb1ce22b45d5e7269baebd6eedc2cc244304808c797e19cbc721df85fd1dad" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.853823 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:38:11 crc kubenswrapper[4938]: E1122 11:38:11.854708 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:38:11 crc kubenswrapper[4938]: I1122 11:38:11.964564 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.134859 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vskd6\" (UniqueName: \"kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6\") pod \"b791ecd2-3990-47d9-a423-a48cc1d07716\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.134925 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host\") pod \"b791ecd2-3990-47d9-a423-a48cc1d07716\" (UID: \"b791ecd2-3990-47d9-a423-a48cc1d07716\") " Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.135158 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host" (OuterVolumeSpecName: "host") pod "b791ecd2-3990-47d9-a423-a48cc1d07716" (UID: "b791ecd2-3990-47d9-a423-a48cc1d07716"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.135423 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b791ecd2-3990-47d9-a423-a48cc1d07716-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.142047 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6" (OuterVolumeSpecName: "kube-api-access-vskd6") pod "b791ecd2-3990-47d9-a423-a48cc1d07716" (UID: "b791ecd2-3990-47d9-a423-a48cc1d07716"). InnerVolumeSpecName "kube-api-access-vskd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.236996 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vskd6\" (UniqueName: \"kubernetes.io/projected/b791ecd2-3990-47d9-a423-a48cc1d07716-kube-api-access-vskd6\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.458449 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b791ecd2-3990-47d9-a423-a48cc1d07716" path="/var/lib/kubelet/pods/b791ecd2-3990-47d9-a423-a48cc1d07716/volumes" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.499288 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-2qzs9"] Nov 22 11:38:12 crc kubenswrapper[4938]: E1122 11:38:12.499738 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b791ecd2-3990-47d9-a423-a48cc1d07716" containerName="container-00" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.499761 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="b791ecd2-3990-47d9-a423-a48cc1d07716" containerName="container-00" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.500020 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="b791ecd2-3990-47d9-a423-a48cc1d07716" containerName="container-00" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.500628 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.645026 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vczzl\" (UniqueName: \"kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.645105 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.747103 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.747212 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.747629 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vczzl\" (UniqueName: \"kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.765856 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vczzl\" (UniqueName: \"kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl\") pod \"crc-debug-2qzs9\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.822250 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.860254 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-d7zqx" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.860260 4938 scope.go:117] "RemoveContainer" containerID="3212189e83402526a57291320c1a7c2ceb727f354a79a319b6bbe64805eeab40" Nov 22 11:38:12 crc kubenswrapper[4938]: I1122 11:38:12.861278 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" event={"ID":"469adffd-bf9a-4c95-8b76-c67f7740f3c0","Type":"ContainerStarted","Data":"5455f9b58010d62b84fd775c8e8cc5a07f739e1f19de7916609b2cf1719d9e85"} Nov 22 11:38:13 crc kubenswrapper[4938]: I1122 11:38:13.872855 4938 generic.go:334] "Generic (PLEG): container finished" podID="469adffd-bf9a-4c95-8b76-c67f7740f3c0" containerID="3534eb9591dc3ed616473ff2e19ec05eb9aad5ac03c6fb2850705a9906ac8856" exitCode=0 Nov 22 11:38:13 crc kubenswrapper[4938]: I1122 11:38:13.872932 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" event={"ID":"469adffd-bf9a-4c95-8b76-c67f7740f3c0","Type":"ContainerDied","Data":"3534eb9591dc3ed616473ff2e19ec05eb9aad5ac03c6fb2850705a9906ac8856"} Nov 22 11:38:13 crc kubenswrapper[4938]: I1122 11:38:13.911523 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-2qzs9"] Nov 22 11:38:13 crc kubenswrapper[4938]: I1122 11:38:13.920856 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pvr4m/crc-debug-2qzs9"] Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.009813 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.095883 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vczzl\" (UniqueName: \"kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl\") pod \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.096301 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host\") pod \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\" (UID: \"469adffd-bf9a-4c95-8b76-c67f7740f3c0\") " Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.096425 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host" (OuterVolumeSpecName: "host") pod "469adffd-bf9a-4c95-8b76-c67f7740f3c0" (UID: "469adffd-bf9a-4c95-8b76-c67f7740f3c0"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.097061 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/469adffd-bf9a-4c95-8b76-c67f7740f3c0-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.103193 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl" (OuterVolumeSpecName: "kube-api-access-vczzl") pod "469adffd-bf9a-4c95-8b76-c67f7740f3c0" (UID: "469adffd-bf9a-4c95-8b76-c67f7740f3c0"). InnerVolumeSpecName "kube-api-access-vczzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.198461 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vczzl\" (UniqueName: \"kubernetes.io/projected/469adffd-bf9a-4c95-8b76-c67f7740f3c0-kube-api-access-vczzl\") on node \"crc\" DevicePath \"\"" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.893583 4938 scope.go:117] "RemoveContainer" containerID="3534eb9591dc3ed616473ff2e19ec05eb9aad5ac03c6fb2850705a9906ac8856" Nov 22 11:38:15 crc kubenswrapper[4938]: I1122 11:38:15.893639 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/crc-debug-2qzs9" Nov 22 11:38:16 crc kubenswrapper[4938]: I1122 11:38:16.464031 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469adffd-bf9a-4c95-8b76-c67f7740f3c0" path="/var/lib/kubelet/pods/469adffd-bf9a-4c95-8b76-c67f7740f3c0/volumes" Nov 22 11:38:23 crc kubenswrapper[4938]: I1122 11:38:23.447764 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:38:23 crc kubenswrapper[4938]: E1122 11:38:23.448569 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:38:29 crc kubenswrapper[4938]: I1122 11:38:29.646631 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbdfff8c8-z9wrl_86ceb17d-9778-45f9-a75e-ed96d5abe722/barbican-api/0.log" Nov 22 11:38:29 crc kubenswrapper[4938]: I1122 11:38:29.699711 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbdfff8c8-z9wrl_86ceb17d-9778-45f9-a75e-ed96d5abe722/barbican-api-log/0.log" Nov 22 11:38:29 crc kubenswrapper[4938]: I1122 11:38:29.837981 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6546d544c4-wcfbr_0d27fa20-5741-49e1-a69c-5f3c856bea32/barbican-keystone-listener/0.log" Nov 22 11:38:29 crc kubenswrapper[4938]: I1122 11:38:29.871559 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6546d544c4-wcfbr_0d27fa20-5741-49e1-a69c-5f3c856bea32/barbican-keystone-listener-log/0.log" Nov 22 11:38:29 crc kubenswrapper[4938]: I1122 11:38:29.916978 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6865f54775-4zkqs_796676ca-fdb7-4ac2-9092-73d2ac3ac760/barbican-worker/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.018845 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6865f54775-4zkqs_796676ca-fdb7-4ac2-9092-73d2ac3ac760/barbican-worker-log/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.160421 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8_a53e09b5-739a-427e-b8f4-48fd612e9b07/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.201204 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/ceilometer-central-agent/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.355726 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/ceilometer-notification-agent/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.361827 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/proxy-httpd/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.395000 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/sg-core/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.569682 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2048ffcd-1faf-44c6-a1e6-425501f44282/cinder-api-log/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.606283 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2048ffcd-1faf-44c6-a1e6-425501f44282/cinder-api/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.695589 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4c5c1e05-59a2-49d5-9bbc-315dc537b994/cinder-scheduler/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.781159 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4c5c1e05-59a2-49d5-9bbc-315dc537b994/probe/0.log" Nov 22 11:38:30 crc kubenswrapper[4938]: I1122 11:38:30.839196 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w_66ca7351-72c9-401f-8602-f7a34033d228/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.002052 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz_85b362e8-25b1-4ed4-8c6c-8fdb1c84e296/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.103539 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/init/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.285439 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/init/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.363839 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-98krt_073859e3-9fc9-45e3-a311-34411cea1556/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.383755 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/dnsmasq-dns/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.529171 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_daf50ab9-a17b-4d53-a2f5-a1f11ed8455e/glance-httpd/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.614527 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_daf50ab9-a17b-4d53-a2f5-a1f11ed8455e/glance-log/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.721744 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8585da6c-5a29-4a79-9aa4-5385381dfd08/glance-httpd/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.728642 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8585da6c-5a29-4a79-9aa4-5385381dfd08/glance-log/0.log" Nov 22 11:38:31 crc kubenswrapper[4938]: I1122 11:38:31.933802 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7bb4f8b4bd-qj489_52d01853-e609-4339-a336-78e1b9f4f704/horizon/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.065111 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-rthm2_71e3a6a6-d91c-416c-9ec6-43429dd10097/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.246990 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7bb4f8b4bd-qj489_52d01853-e609-4339-a336-78e1b9f4f704/horizon-log/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.310771 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-255m5_578cbfd4-2f90-4d71-ac4f-ccdb9f00629f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.524865 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6bf8df76c8-5c2xm_26b58116-00b3-49d5-bf76-d262754d9cfb/keystone-api/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.575454 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29396821-fw2vs_ad31137e-3dac-4a06-9b17-e54340147400/keystone-cron/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.859982 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_190d6459-e173-4817-a60a-b204a9a4bf68/kube-state-metrics/0.log" Nov 22 11:38:32 crc kubenswrapper[4938]: I1122 11:38:32.911172 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-gcd75_6b52293d-9695-46ab-8248-af8bb1a3c464/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:33 crc kubenswrapper[4938]: I1122 11:38:33.409353 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7768cc7979-hrkwz_c4f7f822-da01-4216-9e4c-5ee8a9aa8495/neutron-httpd/0.log" Nov 22 11:38:33 crc kubenswrapper[4938]: I1122 11:38:33.411698 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7768cc7979-hrkwz_c4f7f822-da01-4216-9e4c-5ee8a9aa8495/neutron-api/0.log" Nov 22 11:38:33 crc kubenswrapper[4938]: I1122 11:38:33.437369 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9_2dc63cc5-838b-4bdf-86fe-46ede44788b3/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:33 crc kubenswrapper[4938]: I1122 11:38:33.924297 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5b5cb627-ee85-42aa-95e9-ece522c218a4/nova-api-log/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.016371 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_59fde2f8-5c0c-44ab-a02b-87115af94dc0/nova-cell0-conductor-conductor/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.230930 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5b5cb627-ee85-42aa-95e9-ece522c218a4/nova-api-api/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.325538 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6eb58a07-3ce8-43b4-a75f-a883df1d1e02/nova-cell1-conductor-conductor/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.348270 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d4c78748-ba52-4ebe-b136-07c4f0d939df/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.494666 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-sjbl5_6b2c795f-d47e-411a-a1c0-f59ed58d9506/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:34 crc kubenswrapper[4938]: I1122 11:38:34.673140 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_209e9603-14ba-4706-87a0-00ea7f2bd737/nova-metadata-log/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.016553 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7fb1a3ef-83a7-467e-9cd0-94310d410729/nova-scheduler-scheduler/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.028667 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/mysql-bootstrap/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.238944 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/mysql-bootstrap/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.244681 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/galera/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.448230 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:38:35 crc kubenswrapper[4938]: E1122 11:38:35.448578 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.491551 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/mysql-bootstrap/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.628408 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/mysql-bootstrap/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.640207 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/galera/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.750723 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_209e9603-14ba-4706-87a0-00ea7f2bd737/nova-metadata-metadata/0.log" Nov 22 11:38:35 crc kubenswrapper[4938]: I1122 11:38:35.787009 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_cc16df53-2254-4dc7-8914-88afcbc0b5c4/openstackclient/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.015219 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server-init/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.016665 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-48rkq_a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7/openstack-network-exporter/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.229877 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server-init/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.236406 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovs-vswitchd/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.282135 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.658370 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-sq62w_a2ffb7f9-f83c-4e71-af53-3d116e260d8e/ovn-controller/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.715292 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wn6rr_0c6fb3fe-7488-44ba-a5fc-d24f04f40dec/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.912029 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_05abbe70-d68c-4b0b-a6a3-580b764f3014/ovn-northd/0.log" Nov 22 11:38:36 crc kubenswrapper[4938]: I1122 11:38:36.915153 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_05abbe70-d68c-4b0b-a6a3-580b764f3014/openstack-network-exporter/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.074596 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_0e21a120-bf8c-43ad-b7fe-48e11f0a0545/openstack-network-exporter/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.121414 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_0e21a120-bf8c-43ad-b7fe-48e11f0a0545/ovsdbserver-nb/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.246048 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e7f61b47-1155-4776-acf0-0cb9ea53af1a/openstack-network-exporter/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.334681 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e7f61b47-1155-4776-acf0-0cb9ea53af1a/ovsdbserver-sb/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.510777 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5d64bdcd96-8b525_70cbbc4a-ed36-471c-9b65-5eea9fc87891/placement-log/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.540052 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5d64bdcd96-8b525_70cbbc4a-ed36-471c-9b65-5eea9fc87891/placement-api/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.656185 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/setup-container/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.839071 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/setup-container/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.861588 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/setup-container/0.log" Nov 22 11:38:37 crc kubenswrapper[4938]: I1122 11:38:37.892645 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/rabbitmq/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.124061 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/setup-container/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.196294 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/rabbitmq/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.196868 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72_82ac5576-d4c3-4bb0-a2f3-2f6da7605821/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.351539 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-mbmjk_ad415672-c278-4e60-b205-ff929432c200/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.445103 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v_ac9be71f-c722-4d3e-b43a-7dffaa096daf/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.622582 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-s94th_146c45e8-f683-48dd-99b4-02d5eeab729d/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.694022 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-9895f_a66a985f-423c-4438-b9cc-ad5cbc582077/ssh-known-hosts-edpm-deployment/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.912956 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77b7545b4c-scc96_d1ea1eda-81bc-455d-9f0d-68324fbe5992/proxy-server/0.log" Nov 22 11:38:38 crc kubenswrapper[4938]: I1122 11:38:38.948777 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77b7545b4c-scc96_d1ea1eda-81bc-455d-9f0d-68324fbe5992/proxy-httpd/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.153404 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-xn65g_48baac61-428d-4d1d-aa99-39c8ca12e251/swift-ring-rebalance/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.217359 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-auditor/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.342382 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-reaper/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.449407 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-auditor/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.454833 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-server/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.490045 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-replicator/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.578882 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-replicator/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.666565 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-server/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.688962 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-updater/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.715902 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-auditor/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.811702 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-expirer/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.887489 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-server/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.925614 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-updater/0.log" Nov 22 11:38:39 crc kubenswrapper[4938]: I1122 11:38:39.965572 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-replicator/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.006141 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/rsync/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.091423 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/swift-recon-cron/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.401598 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm_979e4133-a50f-45d0-9eb3-7f684d65c4ce/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.493495 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_7624b768-90d5-4bad-b97e-21ea6549679a/tempest-tests-tempest-tests-runner/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.616136 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_aeda96c8-4659-41c2-947d-f10a1c61bee0/test-operator-logs-container/0.log" Nov 22 11:38:40 crc kubenswrapper[4938]: I1122 11:38:40.702669 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-46d5c_595a1412-b3d2-40ba-bb26-98cd27d79480/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:38:47 crc kubenswrapper[4938]: I1122 11:38:47.392358 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_7e219ab3-870c-4d69-99b6-79758b76a271/memcached/0.log" Nov 22 11:38:50 crc kubenswrapper[4938]: I1122 11:38:50.447512 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:38:50 crc kubenswrapper[4938]: E1122 11:38:50.448284 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.652375 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:38:57 crc kubenswrapper[4938]: E1122 11:38:57.654576 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469adffd-bf9a-4c95-8b76-c67f7740f3c0" containerName="container-00" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.654660 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="469adffd-bf9a-4c95-8b76-c67f7740f3c0" containerName="container-00" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.654968 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="469adffd-bf9a-4c95-8b76-c67f7740f3c0" containerName="container-00" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.656360 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.681856 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.755327 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.755661 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.755758 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wds52\" (UniqueName: \"kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.857651 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.857738 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.857757 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wds52\" (UniqueName: \"kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.858267 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.858295 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.886612 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wds52\" (UniqueName: \"kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52\") pod \"certified-operators-4n25t\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:57 crc kubenswrapper[4938]: I1122 11:38:57.977445 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:38:58 crc kubenswrapper[4938]: I1122 11:38:58.554688 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:38:59 crc kubenswrapper[4938]: I1122 11:38:59.275023 4938 generic.go:334] "Generic (PLEG): container finished" podID="c7602e76-9220-4979-b453-422742fd88ab" containerID="70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2" exitCode=0 Nov 22 11:38:59 crc kubenswrapper[4938]: I1122 11:38:59.275135 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerDied","Data":"70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2"} Nov 22 11:38:59 crc kubenswrapper[4938]: I1122 11:38:59.275303 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerStarted","Data":"51a8e4efb04133c1b1f02098e663d4dd07cc9e10df36e6a3a75ad2dc940add74"} Nov 22 11:39:00 crc kubenswrapper[4938]: I1122 11:39:00.291392 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerStarted","Data":"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9"} Nov 22 11:39:01 crc kubenswrapper[4938]: I1122 11:39:01.302594 4938 generic.go:334] "Generic (PLEG): container finished" podID="c7602e76-9220-4979-b453-422742fd88ab" containerID="d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9" exitCode=0 Nov 22 11:39:01 crc kubenswrapper[4938]: I1122 11:39:01.302653 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerDied","Data":"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9"} Nov 22 11:39:02 crc kubenswrapper[4938]: I1122 11:39:02.314670 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerStarted","Data":"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f"} Nov 22 11:39:02 crc kubenswrapper[4938]: I1122 11:39:02.346153 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4n25t" podStartSLOduration=2.867230093 podStartE2EDuration="5.346128913s" podCreationTimestamp="2025-11-22 11:38:57 +0000 UTC" firstStartedPulling="2025-11-22 11:38:59.276896924 +0000 UTC m=+3671.744734323" lastFinishedPulling="2025-11-22 11:39:01.755795744 +0000 UTC m=+3674.223633143" observedRunningTime="2025-11-22 11:39:02.335585382 +0000 UTC m=+3674.803422781" watchObservedRunningTime="2025-11-22 11:39:02.346128913 +0000 UTC m=+3674.813966312" Nov 22 11:39:03 crc kubenswrapper[4938]: I1122 11:39:03.838146 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.033121 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.059832 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.067808 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.237959 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.262645 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.272317 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/extract/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.490126 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-mrpj9_dde3bedc-34b4-41e0-adba-78c802591de5/kube-rbac-proxy/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.513110 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-mrpj9_dde3bedc-34b4-41e0-adba-78c802591de5/manager/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.534553 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-jhgfc_cba844df-58bc-4d1e-989c-9eb4ccb036b6/kube-rbac-proxy/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.713104 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-jhgfc_cba844df-58bc-4d1e-989c-9eb4ccb036b6/manager/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.714140 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-8b8mj_2415b98c-1a50-4f8d-b094-de51a90a0088/kube-rbac-proxy/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.741419 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-8b8mj_2415b98c-1a50-4f8d-b094-de51a90a0088/manager/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.930157 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6f95d84fd6-s2d7b_a8e3c6f8-4a77-4180-a67b-3dab37169c07/kube-rbac-proxy/0.log" Nov 22 11:39:04 crc kubenswrapper[4938]: I1122 11:39:04.989137 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6f95d84fd6-s2d7b_a8e3c6f8-4a77-4180-a67b-3dab37169c07/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.101496 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-j8ftc_496d9a6a-3979-43af-aa47-9161506bc8e9/kube-rbac-proxy/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.135772 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-j8ftc_496d9a6a-3979-43af-aa47-9161506bc8e9/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.172734 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-r9lgr_8659e26c-11d1-4a24-82e3-42e9737a54b8/kube-rbac-proxy/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.362390 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-r9lgr_8659e26c-11d1-4a24-82e3-42e9737a54b8/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.366517 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-z8ksz_9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95/kube-rbac-proxy/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.447169 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:39:05 crc kubenswrapper[4938]: E1122 11:39:05.447455 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.558627 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-tb846_2a30b9b0-97ac-4268-8d85-193fa80c6b01/kube-rbac-proxy/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.581766 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-z8ksz_9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.596833 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-tb846_2a30b9b0-97ac-4268-8d85-193fa80c6b01/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.806738 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-gxvrt_65118a1f-ed5e-4354-8494-4df42ff6ae6a/kube-rbac-proxy/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.910854 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-gxvrt_65118a1f-ed5e-4354-8494-4df42ff6ae6a/manager/0.log" Nov 22 11:39:05 crc kubenswrapper[4938]: I1122 11:39:05.949045 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-gc4lw_fd298c00-9118-413b-bce4-1198393538fa/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.009705 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-gc4lw_fd298c00-9118-413b-bce4-1198393538fa/manager/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.121225 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qq5ww_c933df24-871e-4075-b48f-f8903914716b/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.149876 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qq5ww_c933df24-871e-4075-b48f-f8903914716b/manager/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.278674 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-xjw2q_46844239-10fa-433c-bd82-565bf911989c/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.468869 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dzpvv_ca599052-ab51-498f-882d-895854e272c4/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.545518 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dzpvv_ca599052-ab51-498f-882d-895854e272c4/manager/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.573060 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-xjw2q_46844239-10fa-433c-bd82-565bf911989c/manager/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.670214 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-qps9n_1016b115-4617-4a19-a992-91dd5b124c9b/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.736287 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-qps9n_1016b115-4617-4a19-a992-91dd5b124c9b/manager/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.867623 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-2l2nm_7e8f66c5-67cb-428e-bc4d-9e6e893af682/kube-rbac-proxy/0.log" Nov 22 11:39:06 crc kubenswrapper[4938]: I1122 11:39:06.901660 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-2l2nm_7e8f66c5-67cb-428e-bc4d-9e6e893af682/manager/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.066430 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f4bc68b84-bqqvf_7fc10dd9-2ded-4a21-badc-6e8bd9615dd1/kube-rbac-proxy/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.212391 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6d45d44995-nrhm2_f4501035-7ea2-41f7-a3d4-12ab72d52a0c/kube-rbac-proxy/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.469860 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6d45d44995-nrhm2_f4501035-7ea2-41f7-a3d4-12ab72d52a0c/operator/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.470038 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-j4l4j_122aa2ac-8dc3-4698-818b-120126fb039b/registry-server/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.672644 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-dx2bq_ee7a691c-6232-4e30-b1bf-400c65b8b127/kube-rbac-proxy/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.765434 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-dx2bq_ee7a691c-6232-4e30-b1bf-400c65b8b127/manager/0.log" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.977662 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:07 crc kubenswrapper[4938]: I1122 11:39:07.978249 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.035255 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.089330 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-98nqr_454affdc-b63c-4696-914f-f2abbf7896ca/kube-rbac-proxy/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.126135 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-98nqr_454affdc-b63c-4696-914f-f2abbf7896ca/manager/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.177361 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f4bc68b84-bqqvf_7fc10dd9-2ded-4a21-badc-6e8bd9615dd1/manager/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.284665 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p_27df649b-2572-42d7-a137-6a82a01c482a/operator/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.314768 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5r8n9_6ceba7c3-c04c-4449-9788-ed341bdaceb7/kube-rbac-proxy/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.394217 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5r8n9_6ceba7c3-c04c-4449-9788-ed341bdaceb7/manager/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.411113 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.458930 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.521964 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-v457z_dbade39a-90d4-49d8-96cc-0a5175783ac1/kube-rbac-proxy/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.571361 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-v457z_dbade39a-90d4-49d8-96cc-0a5175783ac1/manager/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.616856 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-s5pww_584df814-c2c1-4566-a8d0-930b14020095/kube-rbac-proxy/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.646705 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-s5pww_584df814-c2c1-4566-a8d0-930b14020095/manager/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.720422 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-4rmnr_98d39c17-a9b0-483d-b170-eb006b5ee4b9/kube-rbac-proxy/0.log" Nov 22 11:39:08 crc kubenswrapper[4938]: I1122 11:39:08.772343 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-4rmnr_98d39c17-a9b0-483d-b170-eb006b5ee4b9/manager/0.log" Nov 22 11:39:10 crc kubenswrapper[4938]: I1122 11:39:10.380793 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4n25t" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="registry-server" containerID="cri-o://652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f" gracePeriod=2 Nov 22 11:39:10 crc kubenswrapper[4938]: I1122 11:39:10.910653 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.009227 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content\") pod \"c7602e76-9220-4979-b453-422742fd88ab\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.009276 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wds52\" (UniqueName: \"kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52\") pod \"c7602e76-9220-4979-b453-422742fd88ab\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.009423 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities\") pod \"c7602e76-9220-4979-b453-422742fd88ab\" (UID: \"c7602e76-9220-4979-b453-422742fd88ab\") " Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.010153 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities" (OuterVolumeSpecName: "utilities") pod "c7602e76-9220-4979-b453-422742fd88ab" (UID: "c7602e76-9220-4979-b453-422742fd88ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.014618 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52" (OuterVolumeSpecName: "kube-api-access-wds52") pod "c7602e76-9220-4979-b453-422742fd88ab" (UID: "c7602e76-9220-4979-b453-422742fd88ab"). InnerVolumeSpecName "kube-api-access-wds52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.052321 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7602e76-9220-4979-b453-422742fd88ab" (UID: "c7602e76-9220-4979-b453-422742fd88ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.111549 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.111602 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wds52\" (UniqueName: \"kubernetes.io/projected/c7602e76-9220-4979-b453-422742fd88ab-kube-api-access-wds52\") on node \"crc\" DevicePath \"\"" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.111619 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7602e76-9220-4979-b453-422742fd88ab-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.388894 4938 generic.go:334] "Generic (PLEG): container finished" podID="c7602e76-9220-4979-b453-422742fd88ab" containerID="652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f" exitCode=0 Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.388942 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerDied","Data":"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f"} Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.388969 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4n25t" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.388989 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4n25t" event={"ID":"c7602e76-9220-4979-b453-422742fd88ab","Type":"ContainerDied","Data":"51a8e4efb04133c1b1f02098e663d4dd07cc9e10df36e6a3a75ad2dc940add74"} Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.389009 4938 scope.go:117] "RemoveContainer" containerID="652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.429024 4938 scope.go:117] "RemoveContainer" containerID="d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.440024 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.447635 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4n25t"] Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.454757 4938 scope.go:117] "RemoveContainer" containerID="70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.495900 4938 scope.go:117] "RemoveContainer" containerID="652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f" Nov 22 11:39:11 crc kubenswrapper[4938]: E1122 11:39:11.496285 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f\": container with ID starting with 652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f not found: ID does not exist" containerID="652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.496315 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f"} err="failed to get container status \"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f\": rpc error: code = NotFound desc = could not find container \"652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f\": container with ID starting with 652bb18dcecaf009fca69f5862398f9de52234e5eb2415793e8e03ffcfa5ef6f not found: ID does not exist" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.496340 4938 scope.go:117] "RemoveContainer" containerID="d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9" Nov 22 11:39:11 crc kubenswrapper[4938]: E1122 11:39:11.496583 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9\": container with ID starting with d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9 not found: ID does not exist" containerID="d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.496605 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9"} err="failed to get container status \"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9\": rpc error: code = NotFound desc = could not find container \"d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9\": container with ID starting with d6512e543d950d77ab6dfd50c29e69c99ea93e98e4d5f46f3e483a47b6217ab9 not found: ID does not exist" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.496619 4938 scope.go:117] "RemoveContainer" containerID="70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2" Nov 22 11:39:11 crc kubenswrapper[4938]: E1122 11:39:11.496768 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2\": container with ID starting with 70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2 not found: ID does not exist" containerID="70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2" Nov 22 11:39:11 crc kubenswrapper[4938]: I1122 11:39:11.496786 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2"} err="failed to get container status \"70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2\": rpc error: code = NotFound desc = could not find container \"70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2\": container with ID starting with 70e81c7b363b26b6f02e675f1155499db2b192afcbcafd9c95ef14e261a64ab2 not found: ID does not exist" Nov 22 11:39:12 crc kubenswrapper[4938]: I1122 11:39:12.457346 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7602e76-9220-4979-b453-422742fd88ab" path="/var/lib/kubelet/pods/c7602e76-9220-4979-b453-422742fd88ab/volumes" Nov 22 11:39:18 crc kubenswrapper[4938]: I1122 11:39:18.453470 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:39:18 crc kubenswrapper[4938]: E1122 11:39:18.454219 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:39:23 crc kubenswrapper[4938]: I1122 11:39:23.692323 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6xczv_fa995a8f-e92f-45fa-8926-73cea902f283/control-plane-machine-set-operator/0.log" Nov 22 11:39:23 crc kubenswrapper[4938]: I1122 11:39:23.864920 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rqzh7_44da01b3-b33a-402b-9bc1-ceea816d801b/kube-rbac-proxy/0.log" Nov 22 11:39:23 crc kubenswrapper[4938]: I1122 11:39:23.897386 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rqzh7_44da01b3-b33a-402b-9bc1-ceea816d801b/machine-api-operator/0.log" Nov 22 11:39:33 crc kubenswrapper[4938]: I1122 11:39:33.448087 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:39:33 crc kubenswrapper[4938]: E1122 11:39:33.449897 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:39:35 crc kubenswrapper[4938]: I1122 11:39:35.126975 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-vzl7q_58946c83-00e0-4427-8232-d44f5f8f10e0/cert-manager-controller/0.log" Nov 22 11:39:35 crc kubenswrapper[4938]: I1122 11:39:35.291450 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-25mlz_3faf7780-9f49-4f23-ac17-454fbeed3e79/cert-manager-cainjector/0.log" Nov 22 11:39:35 crc kubenswrapper[4938]: I1122 11:39:35.358089 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-cj5bz_17da633d-3102-4583-ba74-5c67cfb859c6/cert-manager-webhook/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.546936 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-msh52_6d740b57-09d5-44f9-90c2-bf2cfeb44311/nmstate-console-plugin/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.656090 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-jhvqn_286573ca-1f3c-4af4-8bfb-ba8b52224082/nmstate-handler/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.717867 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-xfvp7_475ad90f-3054-4344-885d-9fe424557efd/nmstate-metrics/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.720407 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-xfvp7_475ad90f-3054-4344-885d-9fe424557efd/kube-rbac-proxy/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.902178 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-jzqlj_32b53aaa-95c2-4834-a57d-955709a2e992/nmstate-webhook/0.log" Nov 22 11:39:46 crc kubenswrapper[4938]: I1122 11:39:46.937766 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-g9mdw_f899d59c-f579-4865-985d-d87c4fc54922/nmstate-operator/0.log" Nov 22 11:39:47 crc kubenswrapper[4938]: I1122 11:39:47.447390 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:39:47 crc kubenswrapper[4938]: E1122 11:39:47.448123 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:39:59 crc kubenswrapper[4938]: I1122 11:39:59.447685 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:39:59 crc kubenswrapper[4938]: E1122 11:39:59.448537 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:39:59 crc kubenswrapper[4938]: I1122 11:39:59.803720 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-b667v_a4e581ed-7db0-4270-9353-ab48412b2994/kube-rbac-proxy/0.log" Nov 22 11:39:59 crc kubenswrapper[4938]: I1122 11:39:59.943297 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-b667v_a4e581ed-7db0-4270-9353-ab48412b2994/controller/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.047027 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.215676 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.257505 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.257551 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.279418 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.431155 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.446946 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.448587 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.515640 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.684926 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.685968 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.695068 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.696593 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/controller/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.845660 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/frr-metrics/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.893064 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/kube-rbac-proxy/0.log" Nov 22 11:40:00 crc kubenswrapper[4938]: I1122 11:40:00.909062 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/kube-rbac-proxy-frr/0.log" Nov 22 11:40:01 crc kubenswrapper[4938]: I1122 11:40:01.103745 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/reloader/0.log" Nov 22 11:40:01 crc kubenswrapper[4938]: I1122 11:40:01.166863 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-gvwwc_01cf09eb-20ee-4493-9b69-49beca431020/frr-k8s-webhook-server/0.log" Nov 22 11:40:01 crc kubenswrapper[4938]: I1122 11:40:01.390078 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-868865b9b5-bxgn8_0ff86a67-1bc4-4f45-82ae-cd10727037d6/manager/0.log" Nov 22 11:40:01 crc kubenswrapper[4938]: I1122 11:40:01.598202 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-66d55db99c-wjdl4_2490230a-e04c-4569-8870-174b949c7ce6/webhook-server/0.log" Nov 22 11:40:01 crc kubenswrapper[4938]: I1122 11:40:01.629317 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5l59v_8fde61c9-db97-436d-8ee1-852084695193/kube-rbac-proxy/0.log" Nov 22 11:40:02 crc kubenswrapper[4938]: I1122 11:40:02.202343 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/frr/0.log" Nov 22 11:40:02 crc kubenswrapper[4938]: I1122 11:40:02.239705 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5l59v_8fde61c9-db97-436d-8ee1-852084695193/speaker/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.074054 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.259640 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.272947 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.295426 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.493489 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.495961 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.512350 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/extract/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.665035 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.800515 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.813377 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.818378 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.980521 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:40:13 crc kubenswrapper[4938]: I1122 11:40:13.996554 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.144625 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.429160 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.431117 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.447818 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:40:14 crc kubenswrapper[4938]: E1122 11:40:14.448085 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.479185 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/registry-server/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.484970 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.600275 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.640060 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:40:14 crc kubenswrapper[4938]: I1122 11:40:14.811152 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.051691 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.084657 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.088157 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.219256 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/registry-server/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.287661 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.289128 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.289327 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/extract/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.474313 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.474712 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-hscj2_88051af2-c7b9-45b2-a1a5-2c1a025a271b/marketplace-operator/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.668532 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.668877 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.671319 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.848795 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.874802 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:40:15 crc kubenswrapper[4938]: I1122 11:40:15.991206 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/registry-server/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.050183 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.214930 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.218087 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.249849 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.412671 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.441491 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:40:16 crc kubenswrapper[4938]: I1122 11:40:16.895940 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/registry-server/0.log" Nov 22 11:40:28 crc kubenswrapper[4938]: I1122 11:40:28.473384 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:40:28 crc kubenswrapper[4938]: E1122 11:40:28.474262 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:40:39 crc kubenswrapper[4938]: I1122 11:40:39.447471 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:40:39 crc kubenswrapper[4938]: E1122 11:40:39.448088 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:40:54 crc kubenswrapper[4938]: I1122 11:40:54.447858 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:40:54 crc kubenswrapper[4938]: E1122 11:40:54.448669 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:41:09 crc kubenswrapper[4938]: I1122 11:41:09.446970 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:41:09 crc kubenswrapper[4938]: E1122 11:41:09.447659 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:41:23 crc kubenswrapper[4938]: I1122 11:41:23.448100 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:41:23 crc kubenswrapper[4938]: E1122 11:41:23.449031 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:41:38 crc kubenswrapper[4938]: I1122 11:41:38.453315 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:41:38 crc kubenswrapper[4938]: E1122 11:41:38.454390 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:41:51 crc kubenswrapper[4938]: I1122 11:41:51.449511 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:41:51 crc kubenswrapper[4938]: E1122 11:41:51.450375 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:41:57 crc kubenswrapper[4938]: I1122 11:41:57.910371 4938 generic.go:334] "Generic (PLEG): container finished" podID="cf39f269-fac4-4d09-9d9b-39608937d005" containerID="f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252" exitCode=0 Nov 22 11:41:57 crc kubenswrapper[4938]: I1122 11:41:57.910504 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" event={"ID":"cf39f269-fac4-4d09-9d9b-39608937d005","Type":"ContainerDied","Data":"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252"} Nov 22 11:41:57 crc kubenswrapper[4938]: I1122 11:41:57.911522 4938 scope.go:117] "RemoveContainer" containerID="f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252" Nov 22 11:41:58 crc kubenswrapper[4938]: I1122 11:41:58.890656 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pvr4m_must-gather-tn9hb_cf39f269-fac4-4d09-9d9b-39608937d005/gather/0.log" Nov 22 11:42:03 crc kubenswrapper[4938]: I1122 11:42:03.448250 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:42:03 crc kubenswrapper[4938]: E1122 11:42:03.449751 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.040362 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pvr4m/must-gather-tn9hb"] Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.041139 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="copy" containerID="cri-o://5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053" gracePeriod=2 Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.048376 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pvr4m/must-gather-tn9hb"] Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.476843 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pvr4m_must-gather-tn9hb_cf39f269-fac4-4d09-9d9b-39608937d005/copy/0.log" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.477699 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.530866 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output\") pod \"cf39f269-fac4-4d09-9d9b-39608937d005\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.530956 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl5t4\" (UniqueName: \"kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4\") pod \"cf39f269-fac4-4d09-9d9b-39608937d005\" (UID: \"cf39f269-fac4-4d09-9d9b-39608937d005\") " Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.539592 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4" (OuterVolumeSpecName: "kube-api-access-gl5t4") pod "cf39f269-fac4-4d09-9d9b-39608937d005" (UID: "cf39f269-fac4-4d09-9d9b-39608937d005"). InnerVolumeSpecName "kube-api-access-gl5t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.634099 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl5t4\" (UniqueName: \"kubernetes.io/projected/cf39f269-fac4-4d09-9d9b-39608937d005-kube-api-access-gl5t4\") on node \"crc\" DevicePath \"\"" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.664091 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "cf39f269-fac4-4d09-9d9b-39608937d005" (UID: "cf39f269-fac4-4d09-9d9b-39608937d005"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.736566 4938 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf39f269-fac4-4d09-9d9b-39608937d005-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.998587 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pvr4m_must-gather-tn9hb_cf39f269-fac4-4d09-9d9b-39608937d005/copy/0.log" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.998943 4938 generic.go:334] "Generic (PLEG): container finished" podID="cf39f269-fac4-4d09-9d9b-39608937d005" containerID="5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053" exitCode=143 Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.998992 4938 scope.go:117] "RemoveContainer" containerID="5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053" Nov 22 11:42:06 crc kubenswrapper[4938]: I1122 11:42:06.999038 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pvr4m/must-gather-tn9hb" Nov 22 11:42:07 crc kubenswrapper[4938]: I1122 11:42:07.028573 4938 scope.go:117] "RemoveContainer" containerID="f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252" Nov 22 11:42:07 crc kubenswrapper[4938]: I1122 11:42:07.087069 4938 scope.go:117] "RemoveContainer" containerID="5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053" Nov 22 11:42:07 crc kubenswrapper[4938]: E1122 11:42:07.089857 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053\": container with ID starting with 5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053 not found: ID does not exist" containerID="5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053" Nov 22 11:42:07 crc kubenswrapper[4938]: I1122 11:42:07.089951 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053"} err="failed to get container status \"5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053\": rpc error: code = NotFound desc = could not find container \"5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053\": container with ID starting with 5da696528e847817c2958bcc2650cabdd598f71a530bc1d451f933025868a053 not found: ID does not exist" Nov 22 11:42:07 crc kubenswrapper[4938]: I1122 11:42:07.089985 4938 scope.go:117] "RemoveContainer" containerID="f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252" Nov 22 11:42:07 crc kubenswrapper[4938]: E1122 11:42:07.091743 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252\": container with ID starting with f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252 not found: ID does not exist" containerID="f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252" Nov 22 11:42:07 crc kubenswrapper[4938]: I1122 11:42:07.091782 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252"} err="failed to get container status \"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252\": rpc error: code = NotFound desc = could not find container \"f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252\": container with ID starting with f4fda19f0a4db405d3d4997d50259186dbe4059597f0fdb8c1a9ef349e386252 not found: ID does not exist" Nov 22 11:42:08 crc kubenswrapper[4938]: I1122 11:42:08.456633 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" path="/var/lib/kubelet/pods/cf39f269-fac4-4d09-9d9b-39608937d005/volumes" Nov 22 11:42:18 crc kubenswrapper[4938]: I1122 11:42:18.455000 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:42:18 crc kubenswrapper[4938]: E1122 11:42:18.456029 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:42:31 crc kubenswrapper[4938]: I1122 11:42:31.449933 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:42:31 crc kubenswrapper[4938]: E1122 11:42:31.451551 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:42:43 crc kubenswrapper[4938]: I1122 11:42:43.447737 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:42:43 crc kubenswrapper[4938]: E1122 11:42:43.448472 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:42:58 crc kubenswrapper[4938]: I1122 11:42:58.460416 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:42:58 crc kubenswrapper[4938]: E1122 11:42:58.461362 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:43:09 crc kubenswrapper[4938]: I1122 11:43:09.447981 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:43:09 crc kubenswrapper[4938]: E1122 11:43:09.449380 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:43:20 crc kubenswrapper[4938]: I1122 11:43:20.447703 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:43:20 crc kubenswrapper[4938]: I1122 11:43:20.718109 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98"} Nov 22 11:43:26 crc kubenswrapper[4938]: I1122 11:43:26.926277 4938 scope.go:117] "RemoveContainer" containerID="3a1c84d050534943802ec556d3cfc19cc3eb4e4ffa441468c2e579d9f15223b9" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.037235 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jj4sz/must-gather-c6867"] Nov 22 11:44:33 crc kubenswrapper[4938]: E1122 11:44:33.038140 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="gather" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038153 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="gather" Nov 22 11:44:33 crc kubenswrapper[4938]: E1122 11:44:33.038165 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="registry-server" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038172 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="registry-server" Nov 22 11:44:33 crc kubenswrapper[4938]: E1122 11:44:33.038187 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="extract-content" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038193 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="extract-content" Nov 22 11:44:33 crc kubenswrapper[4938]: E1122 11:44:33.038221 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="copy" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038227 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="copy" Nov 22 11:44:33 crc kubenswrapper[4938]: E1122 11:44:33.038239 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="extract-utilities" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038244 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="extract-utilities" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038412 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="gather" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038430 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf39f269-fac4-4d09-9d9b-39608937d005" containerName="copy" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.038443 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7602e76-9220-4979-b453-422742fd88ab" containerName="registry-server" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.039515 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.043629 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jj4sz"/"openshift-service-ca.crt" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.044104 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-jj4sz"/"default-dockercfg-ddqw4" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.044532 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-jj4sz"/"kube-root-ca.crt" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.046053 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jj4sz/must-gather-c6867"] Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.086015 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.087094 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.187757 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.187821 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.188323 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.209716 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk\") pod \"must-gather-c6867\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:33 crc kubenswrapper[4938]: I1122 11:44:33.360610 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:44:34 crc kubenswrapper[4938]: I1122 11:44:34.093182 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-jj4sz/must-gather-c6867"] Nov 22 11:44:34 crc kubenswrapper[4938]: I1122 11:44:34.439286 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/must-gather-c6867" event={"ID":"de2b429f-17b4-4115-9b1e-45d6a5c37446","Type":"ContainerStarted","Data":"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450"} Nov 22 11:44:34 crc kubenswrapper[4938]: I1122 11:44:34.439610 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/must-gather-c6867" event={"ID":"de2b429f-17b4-4115-9b1e-45d6a5c37446","Type":"ContainerStarted","Data":"17aea8fa82e57b9ef9a2a1db8fb79ba5b20b5d07c9b016ac554d76e8e65b8cd9"} Nov 22 11:44:35 crc kubenswrapper[4938]: I1122 11:44:35.449549 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/must-gather-c6867" event={"ID":"de2b429f-17b4-4115-9b1e-45d6a5c37446","Type":"ContainerStarted","Data":"2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae"} Nov 22 11:44:35 crc kubenswrapper[4938]: I1122 11:44:35.473848 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jj4sz/must-gather-c6867" podStartSLOduration=2.473832308 podStartE2EDuration="2.473832308s" podCreationTimestamp="2025-11-22 11:44:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:44:35.469405418 +0000 UTC m=+4007.937242817" watchObservedRunningTime="2025-11-22 11:44:35.473832308 +0000 UTC m=+4007.941669707" Nov 22 11:44:37 crc kubenswrapper[4938]: E1122 11:44:37.223122 4938 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.182:50552->38.102.83.182:45775: read tcp 38.102.83.182:50552->38.102.83.182:45775: read: connection reset by peer Nov 22 11:44:37 crc kubenswrapper[4938]: I1122 11:44:37.979803 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-c7ld7"] Nov 22 11:44:37 crc kubenswrapper[4938]: I1122 11:44:37.981077 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.074930 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.075454 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vgzl\" (UniqueName: \"kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.176324 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.176411 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vgzl\" (UniqueName: \"kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.176475 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.195666 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vgzl\" (UniqueName: \"kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl\") pod \"crc-debug-c7ld7\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.301998 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:44:38 crc kubenswrapper[4938]: W1122 11:44:38.329549 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod841ee8d7_cff5_40d0_addb_91b909c18b2f.slice/crio-bf6fc44a0a69282de27b42ade1dd28a84fe3af6262699f46410c6aecc649c646 WatchSource:0}: Error finding container bf6fc44a0a69282de27b42ade1dd28a84fe3af6262699f46410c6aecc649c646: Status 404 returned error can't find the container with id bf6fc44a0a69282de27b42ade1dd28a84fe3af6262699f46410c6aecc649c646 Nov 22 11:44:38 crc kubenswrapper[4938]: I1122 11:44:38.472743 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" event={"ID":"841ee8d7-cff5-40d0-addb-91b909c18b2f","Type":"ContainerStarted","Data":"bf6fc44a0a69282de27b42ade1dd28a84fe3af6262699f46410c6aecc649c646"} Nov 22 11:44:39 crc kubenswrapper[4938]: I1122 11:44:39.482050 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" event={"ID":"841ee8d7-cff5-40d0-addb-91b909c18b2f","Type":"ContainerStarted","Data":"43a445ccba1c73991711705391a856aec8cf1d2151378561d15ee8211578c5f4"} Nov 22 11:44:39 crc kubenswrapper[4938]: I1122 11:44:39.507418 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" podStartSLOduration=2.50739373 podStartE2EDuration="2.50739373s" podCreationTimestamp="2025-11-22 11:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:44:39.494981741 +0000 UTC m=+4011.962819140" watchObservedRunningTime="2025-11-22 11:44:39.50739373 +0000 UTC m=+4011.975231129" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.186179 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p"] Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.188274 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.190489 4938 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.190741 4938 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.209553 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p"] Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.278280 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.278465 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96ds8\" (UniqueName: \"kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.278730 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.380552 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.381793 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.382113 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96ds8\" (UniqueName: \"kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.386657 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.391839 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.409498 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96ds8\" (UniqueName: \"kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8\") pod \"collect-profiles-29396865-5p69p\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:00 crc kubenswrapper[4938]: I1122 11:45:00.509824 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.108781 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p"] Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.380571 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.383200 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.400043 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.503290 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zps77\" (UniqueName: \"kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.503364 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.503416 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.605117 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zps77\" (UniqueName: \"kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.605208 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.605264 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.605732 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.605805 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.624860 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zps77\" (UniqueName: \"kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77\") pod \"community-operators-ltpcf\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.682044 4938 generic.go:334] "Generic (PLEG): container finished" podID="ea396959-21d8-4ae4-9e05-d117ec03163a" containerID="4cd014d7141b56caffeec62f6d5d13cfd33ed41611b68d187cdc434a937e9e65" exitCode=0 Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.682104 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" event={"ID":"ea396959-21d8-4ae4-9e05-d117ec03163a","Type":"ContainerDied","Data":"4cd014d7141b56caffeec62f6d5d13cfd33ed41611b68d187cdc434a937e9e65"} Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.682131 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" event={"ID":"ea396959-21d8-4ae4-9e05-d117ec03163a","Type":"ContainerStarted","Data":"86745c112095828f67c10ff80271107551a3786111eaddb237369000d82ac420"} Nov 22 11:45:01 crc kubenswrapper[4938]: I1122 11:45:01.706803 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:02 crc kubenswrapper[4938]: W1122 11:45:02.302877 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0c7851f_d8a9_4d7e_94a0_13825bfb1f5d.slice/crio-928e28f1af19f83fa3c97c46ea8ef2b731b86f46ca84cb47657703157dce357c WatchSource:0}: Error finding container 928e28f1af19f83fa3c97c46ea8ef2b731b86f46ca84cb47657703157dce357c: Status 404 returned error can't find the container with id 928e28f1af19f83fa3c97c46ea8ef2b731b86f46ca84cb47657703157dce357c Nov 22 11:45:02 crc kubenswrapper[4938]: I1122 11:45:02.304004 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:02 crc kubenswrapper[4938]: I1122 11:45:02.691494 4938 generic.go:334] "Generic (PLEG): container finished" podID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerID="cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b" exitCode=0 Nov 22 11:45:02 crc kubenswrapper[4938]: I1122 11:45:02.692688 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerDied","Data":"cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b"} Nov 22 11:45:02 crc kubenswrapper[4938]: I1122 11:45:02.692711 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerStarted","Data":"928e28f1af19f83fa3c97c46ea8ef2b731b86f46ca84cb47657703157dce357c"} Nov 22 11:45:02 crc kubenswrapper[4938]: I1122 11:45:02.693659 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.020777 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.132973 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96ds8\" (UniqueName: \"kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8\") pod \"ea396959-21d8-4ae4-9e05-d117ec03163a\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.133101 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume\") pod \"ea396959-21d8-4ae4-9e05-d117ec03163a\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.133176 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume\") pod \"ea396959-21d8-4ae4-9e05-d117ec03163a\" (UID: \"ea396959-21d8-4ae4-9e05-d117ec03163a\") " Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.133718 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume" (OuterVolumeSpecName: "config-volume") pod "ea396959-21d8-4ae4-9e05-d117ec03163a" (UID: "ea396959-21d8-4ae4-9e05-d117ec03163a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.138939 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8" (OuterVolumeSpecName: "kube-api-access-96ds8") pod "ea396959-21d8-4ae4-9e05-d117ec03163a" (UID: "ea396959-21d8-4ae4-9e05-d117ec03163a"). InnerVolumeSpecName "kube-api-access-96ds8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.148723 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ea396959-21d8-4ae4-9e05-d117ec03163a" (UID: "ea396959-21d8-4ae4-9e05-d117ec03163a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.235720 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96ds8\" (UniqueName: \"kubernetes.io/projected/ea396959-21d8-4ae4-9e05-d117ec03163a-kube-api-access-96ds8\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.236129 4938 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ea396959-21d8-4ae4-9e05-d117ec03163a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.236231 4938 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ea396959-21d8-4ae4-9e05-d117ec03163a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.704125 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" event={"ID":"ea396959-21d8-4ae4-9e05-d117ec03163a","Type":"ContainerDied","Data":"86745c112095828f67c10ff80271107551a3786111eaddb237369000d82ac420"} Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.704489 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86745c112095828f67c10ff80271107551a3786111eaddb237369000d82ac420" Nov 22 11:45:03 crc kubenswrapper[4938]: I1122 11:45:03.704162 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-5p69p" Nov 22 11:45:04 crc kubenswrapper[4938]: I1122 11:45:04.094386 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt"] Nov 22 11:45:04 crc kubenswrapper[4938]: I1122 11:45:04.101008 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-6dlzt"] Nov 22 11:45:04 crc kubenswrapper[4938]: I1122 11:45:04.462084 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="281dba48-4a29-47f7-8228-239a69e7214a" path="/var/lib/kubelet/pods/281dba48-4a29-47f7-8228-239a69e7214a/volumes" Nov 22 11:45:04 crc kubenswrapper[4938]: I1122 11:45:04.715008 4938 generic.go:334] "Generic (PLEG): container finished" podID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerID="c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9" exitCode=0 Nov 22 11:45:04 crc kubenswrapper[4938]: I1122 11:45:04.715064 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerDied","Data":"c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9"} Nov 22 11:45:05 crc kubenswrapper[4938]: I1122 11:45:05.725588 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerStarted","Data":"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659"} Nov 22 11:45:05 crc kubenswrapper[4938]: I1122 11:45:05.746087 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ltpcf" podStartSLOduration=2.165525014 podStartE2EDuration="4.746068719s" podCreationTimestamp="2025-11-22 11:45:01 +0000 UTC" firstStartedPulling="2025-11-22 11:45:02.693422683 +0000 UTC m=+4035.161260082" lastFinishedPulling="2025-11-22 11:45:05.273966388 +0000 UTC m=+4037.741803787" observedRunningTime="2025-11-22 11:45:05.741194807 +0000 UTC m=+4038.209032196" watchObservedRunningTime="2025-11-22 11:45:05.746068719 +0000 UTC m=+4038.213906118" Nov 22 11:45:11 crc kubenswrapper[4938]: I1122 11:45:11.707110 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:11 crc kubenswrapper[4938]: I1122 11:45:11.707825 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:11 crc kubenswrapper[4938]: I1122 11:45:11.763089 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:11 crc kubenswrapper[4938]: I1122 11:45:11.820496 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:12 crc kubenswrapper[4938]: I1122 11:45:12.000829 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:13 crc kubenswrapper[4938]: I1122 11:45:13.798111 4938 generic.go:334] "Generic (PLEG): container finished" podID="841ee8d7-cff5-40d0-addb-91b909c18b2f" containerID="43a445ccba1c73991711705391a856aec8cf1d2151378561d15ee8211578c5f4" exitCode=0 Nov 22 11:45:13 crc kubenswrapper[4938]: I1122 11:45:13.798216 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" event={"ID":"841ee8d7-cff5-40d0-addb-91b909c18b2f","Type":"ContainerDied","Data":"43a445ccba1c73991711705391a856aec8cf1d2151378561d15ee8211578c5f4"} Nov 22 11:45:13 crc kubenswrapper[4938]: I1122 11:45:13.799035 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ltpcf" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="registry-server" containerID="cri-o://fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659" gracePeriod=2 Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.330851 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.439042 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zps77\" (UniqueName: \"kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77\") pod \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.439572 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities\") pod \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.439634 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content\") pod \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\" (UID: \"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d\") " Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.441296 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities" (OuterVolumeSpecName: "utilities") pod "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" (UID: "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.446091 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77" (OuterVolumeSpecName: "kube-api-access-zps77") pod "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" (UID: "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d"). InnerVolumeSpecName "kube-api-access-zps77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.529644 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" (UID: "c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.545317 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zps77\" (UniqueName: \"kubernetes.io/projected/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-kube-api-access-zps77\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.545353 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.545362 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.811362 4938 generic.go:334] "Generic (PLEG): container finished" podID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerID="fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659" exitCode=0 Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.811421 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ltpcf" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.811455 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerDied","Data":"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659"} Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.811486 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ltpcf" event={"ID":"c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d","Type":"ContainerDied","Data":"928e28f1af19f83fa3c97c46ea8ef2b731b86f46ca84cb47657703157dce357c"} Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.811503 4938 scope.go:117] "RemoveContainer" containerID="fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.908788 4938 scope.go:117] "RemoveContainer" containerID="c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.909265 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.922440 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.937142 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ltpcf"] Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.951947 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host\") pod \"841ee8d7-cff5-40d0-addb-91b909c18b2f\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.952042 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vgzl\" (UniqueName: \"kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl\") pod \"841ee8d7-cff5-40d0-addb-91b909c18b2f\" (UID: \"841ee8d7-cff5-40d0-addb-91b909c18b2f\") " Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.952135 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host" (OuterVolumeSpecName: "host") pod "841ee8d7-cff5-40d0-addb-91b909c18b2f" (UID: "841ee8d7-cff5-40d0-addb-91b909c18b2f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.952713 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/841ee8d7-cff5-40d0-addb-91b909c18b2f-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.961702 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl" (OuterVolumeSpecName: "kube-api-access-8vgzl") pod "841ee8d7-cff5-40d0-addb-91b909c18b2f" (UID: "841ee8d7-cff5-40d0-addb-91b909c18b2f"). InnerVolumeSpecName "kube-api-access-8vgzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.966322 4938 scope.go:117] "RemoveContainer" containerID="cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b" Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.970782 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-c7ld7"] Nov 22 11:45:14 crc kubenswrapper[4938]: I1122 11:45:14.984370 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-c7ld7"] Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.048099 4938 scope.go:117] "RemoveContainer" containerID="fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659" Nov 22 11:45:15 crc kubenswrapper[4938]: E1122 11:45:15.048558 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659\": container with ID starting with fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659 not found: ID does not exist" containerID="fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.048605 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659"} err="failed to get container status \"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659\": rpc error: code = NotFound desc = could not find container \"fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659\": container with ID starting with fb0401ea1e209605493ed3470ebc305b9b88395a8124c92ec19cc5dab39bc659 not found: ID does not exist" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.048632 4938 scope.go:117] "RemoveContainer" containerID="c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9" Nov 22 11:45:15 crc kubenswrapper[4938]: E1122 11:45:15.048906 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9\": container with ID starting with c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9 not found: ID does not exist" containerID="c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.048944 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9"} err="failed to get container status \"c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9\": rpc error: code = NotFound desc = could not find container \"c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9\": container with ID starting with c958c1e369d25309fe53d10bf611338226a465cf7f8e5f44210a95139d39dee9 not found: ID does not exist" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.048958 4938 scope.go:117] "RemoveContainer" containerID="cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b" Nov 22 11:45:15 crc kubenswrapper[4938]: E1122 11:45:15.049221 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b\": container with ID starting with cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b not found: ID does not exist" containerID="cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.049246 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b"} err="failed to get container status \"cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b\": rpc error: code = NotFound desc = could not find container \"cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b\": container with ID starting with cfcae21de63a3040b2609cfce09ef415e9a34e4966f980dbd765301d898b587b not found: ID does not exist" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.056212 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vgzl\" (UniqueName: \"kubernetes.io/projected/841ee8d7-cff5-40d0-addb-91b909c18b2f-kube-api-access-8vgzl\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.822586 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf6fc44a0a69282de27b42ade1dd28a84fe3af6262699f46410c6aecc649c646" Nov 22 11:45:15 crc kubenswrapper[4938]: I1122 11:45:15.822672 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-c7ld7" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163137 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-p447z"] Nov 22 11:45:16 crc kubenswrapper[4938]: E1122 11:45:16.163810 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="extract-content" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163824 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="extract-content" Nov 22 11:45:16 crc kubenswrapper[4938]: E1122 11:45:16.163840 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea396959-21d8-4ae4-9e05-d117ec03163a" containerName="collect-profiles" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163846 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea396959-21d8-4ae4-9e05-d117ec03163a" containerName="collect-profiles" Nov 22 11:45:16 crc kubenswrapper[4938]: E1122 11:45:16.163863 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="registry-server" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163869 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="registry-server" Nov 22 11:45:16 crc kubenswrapper[4938]: E1122 11:45:16.163885 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="841ee8d7-cff5-40d0-addb-91b909c18b2f" containerName="container-00" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163891 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="841ee8d7-cff5-40d0-addb-91b909c18b2f" containerName="container-00" Nov 22 11:45:16 crc kubenswrapper[4938]: E1122 11:45:16.163928 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="extract-utilities" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.163936 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="extract-utilities" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.164126 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="841ee8d7-cff5-40d0-addb-91b909c18b2f" containerName="container-00" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.164143 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" containerName="registry-server" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.164159 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea396959-21d8-4ae4-9e05-d117ec03163a" containerName="collect-profiles" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.164752 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.277613 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd488\" (UniqueName: \"kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.277697 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.379072 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd488\" (UniqueName: \"kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.379132 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.379275 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.462890 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="841ee8d7-cff5-40d0-addb-91b909c18b2f" path="/var/lib/kubelet/pods/841ee8d7-cff5-40d0-addb-91b909c18b2f/volumes" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.463751 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d" path="/var/lib/kubelet/pods/c0c7851f-d8a9-4d7e-94a0-13825bfb1f5d/volumes" Nov 22 11:45:16 crc kubenswrapper[4938]: I1122 11:45:16.819785 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd488\" (UniqueName: \"kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488\") pod \"crc-debug-p447z\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.084248 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:17 crc kubenswrapper[4938]: W1122 11:45:17.122018 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod085b1f2b_d7ea_4d28_8dfe_e08245b0dd5c.slice/crio-442b9203f24af6c3edfb34bcd072e88de02dfbb1fa6dc8f08427c743e819234e WatchSource:0}: Error finding container 442b9203f24af6c3edfb34bcd072e88de02dfbb1fa6dc8f08427c743e819234e: Status 404 returned error can't find the container with id 442b9203f24af6c3edfb34bcd072e88de02dfbb1fa6dc8f08427c743e819234e Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.414555 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.420347 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.442570 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.499145 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnmb7\" (UniqueName: \"kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.499358 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.499571 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.601755 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.601901 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.602031 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnmb7\" (UniqueName: \"kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.602832 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.603858 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.620463 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnmb7\" (UniqueName: \"kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7\") pod \"redhat-marketplace-gzfvn\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.760736 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.852827 4938 generic.go:334] "Generic (PLEG): container finished" podID="085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" containerID="56f7666e81fbdc8d2900c371614d988e2c331b3cbd16450ed8ffd1d061525194" exitCode=0 Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.853090 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-p447z" event={"ID":"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c","Type":"ContainerDied","Data":"56f7666e81fbdc8d2900c371614d988e2c331b3cbd16450ed8ffd1d061525194"} Nov 22 11:45:17 crc kubenswrapper[4938]: I1122 11:45:17.853116 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-p447z" event={"ID":"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c","Type":"ContainerStarted","Data":"442b9203f24af6c3edfb34bcd072e88de02dfbb1fa6dc8f08427c743e819234e"} Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.205394 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.281323 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-p447z"] Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.288749 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-p447z"] Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.861778 4938 generic.go:334] "Generic (PLEG): container finished" podID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerID="ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2" exitCode=0 Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.863399 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerDied","Data":"ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2"} Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.863426 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerStarted","Data":"eadf70ede0e177877178d4794da7a27c39c0133865f09f6e399204dfde6b17f2"} Nov 22 11:45:18 crc kubenswrapper[4938]: I1122 11:45:18.960975 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.045776 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd488\" (UniqueName: \"kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488\") pod \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.045872 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host\") pod \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\" (UID: \"085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c\") " Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.045971 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host" (OuterVolumeSpecName: "host") pod "085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" (UID: "085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.046386 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.119761 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488" (OuterVolumeSpecName: "kube-api-access-gd488") pod "085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" (UID: "085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c"). InnerVolumeSpecName "kube-api-access-gd488". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.148205 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd488\" (UniqueName: \"kubernetes.io/projected/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c-kube-api-access-gd488\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.769273 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-kfn9s"] Nov 22 11:45:19 crc kubenswrapper[4938]: E1122 11:45:19.770014 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" containerName="container-00" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.770029 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" containerName="container-00" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.770245 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" containerName="container-00" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.770856 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.861349 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.861511 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrqrw\" (UniqueName: \"kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.879022 4938 generic.go:334] "Generic (PLEG): container finished" podID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerID="142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598" exitCode=0 Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.879088 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerDied","Data":"142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598"} Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.883617 4938 scope.go:117] "RemoveContainer" containerID="56f7666e81fbdc8d2900c371614d988e2c331b3cbd16450ed8ffd1d061525194" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.883716 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-p447z" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.963431 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.963572 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrqrw\" (UniqueName: \"kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.963742 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:19 crc kubenswrapper[4938]: I1122 11:45:19.982678 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrqrw\" (UniqueName: \"kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw\") pod \"crc-debug-kfn9s\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.112751 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:20 crc kubenswrapper[4938]: W1122 11:45:20.144111 4938 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52871c97_c1d3_4aac_af04_66333789a243.slice/crio-59e5f088446954650cba6698017ef2fcd481968a2257bb29456fa331aafdd1a1 WatchSource:0}: Error finding container 59e5f088446954650cba6698017ef2fcd481968a2257bb29456fa331aafdd1a1: Status 404 returned error can't find the container with id 59e5f088446954650cba6698017ef2fcd481968a2257bb29456fa331aafdd1a1 Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.457901 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c" path="/var/lib/kubelet/pods/085b1f2b-d7ea-4d28-8dfe-e08245b0dd5c/volumes" Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.896974 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerStarted","Data":"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff"} Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.903393 4938 generic.go:334] "Generic (PLEG): container finished" podID="52871c97-c1d3-4aac-af04-66333789a243" containerID="0d2a09318799d80e5b6e6367c93df5711795dcb3f0d719bc179d27a9268b1c0e" exitCode=0 Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.903433 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" event={"ID":"52871c97-c1d3-4aac-af04-66333789a243","Type":"ContainerDied","Data":"0d2a09318799d80e5b6e6367c93df5711795dcb3f0d719bc179d27a9268b1c0e"} Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.903511 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" event={"ID":"52871c97-c1d3-4aac-af04-66333789a243","Type":"ContainerStarted","Data":"59e5f088446954650cba6698017ef2fcd481968a2257bb29456fa331aafdd1a1"} Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.932612 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gzfvn" podStartSLOduration=2.489946283 podStartE2EDuration="3.932588693s" podCreationTimestamp="2025-11-22 11:45:17 +0000 UTC" firstStartedPulling="2025-11-22 11:45:18.864003153 +0000 UTC m=+4051.331840552" lastFinishedPulling="2025-11-22 11:45:20.306645563 +0000 UTC m=+4052.774482962" observedRunningTime="2025-11-22 11:45:20.927207059 +0000 UTC m=+4053.395044468" watchObservedRunningTime="2025-11-22 11:45:20.932588693 +0000 UTC m=+4053.400426102" Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.972559 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-kfn9s"] Nov 22 11:45:20 crc kubenswrapper[4938]: I1122 11:45:20.986671 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jj4sz/crc-debug-kfn9s"] Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.026648 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.105828 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrqrw\" (UniqueName: \"kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw\") pod \"52871c97-c1d3-4aac-af04-66333789a243\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.105928 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host\") pod \"52871c97-c1d3-4aac-af04-66333789a243\" (UID: \"52871c97-c1d3-4aac-af04-66333789a243\") " Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.106011 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host" (OuterVolumeSpecName: "host") pod "52871c97-c1d3-4aac-af04-66333789a243" (UID: "52871c97-c1d3-4aac-af04-66333789a243"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.108026 4938 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/52871c97-c1d3-4aac-af04-66333789a243-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.119153 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw" (OuterVolumeSpecName: "kube-api-access-nrqrw") pod "52871c97-c1d3-4aac-af04-66333789a243" (UID: "52871c97-c1d3-4aac-af04-66333789a243"). InnerVolumeSpecName "kube-api-access-nrqrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.209694 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrqrw\" (UniqueName: \"kubernetes.io/projected/52871c97-c1d3-4aac-af04-66333789a243-kube-api-access-nrqrw\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.459754 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52871c97-c1d3-4aac-af04-66333789a243" path="/var/lib/kubelet/pods/52871c97-c1d3-4aac-af04-66333789a243/volumes" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.920394 4938 scope.go:117] "RemoveContainer" containerID="0d2a09318799d80e5b6e6367c93df5711795dcb3f0d719bc179d27a9268b1c0e" Nov 22 11:45:22 crc kubenswrapper[4938]: I1122 11:45:22.920450 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/crc-debug-kfn9s" Nov 22 11:45:26 crc kubenswrapper[4938]: I1122 11:45:26.996541 4938 scope.go:117] "RemoveContainer" containerID="96ba1039e4f3127780369d879e3ff5c718051ff9c18ee651a6fd74d8b8929616" Nov 22 11:45:27 crc kubenswrapper[4938]: I1122 11:45:27.762237 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:27 crc kubenswrapper[4938]: I1122 11:45:27.762535 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:27 crc kubenswrapper[4938]: I1122 11:45:27.815270 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:28 crc kubenswrapper[4938]: I1122 11:45:28.006802 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:28 crc kubenswrapper[4938]: I1122 11:45:28.058797 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:29 crc kubenswrapper[4938]: I1122 11:45:29.975737 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gzfvn" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="registry-server" containerID="cri-o://12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff" gracePeriod=2 Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.465648 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.579127 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnmb7\" (UniqueName: \"kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7\") pod \"72076617-9e8e-4ddf-8d6a-73086a29ed31\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.579587 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content\") pod \"72076617-9e8e-4ddf-8d6a-73086a29ed31\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.579747 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities\") pod \"72076617-9e8e-4ddf-8d6a-73086a29ed31\" (UID: \"72076617-9e8e-4ddf-8d6a-73086a29ed31\") " Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.580674 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities" (OuterVolumeSpecName: "utilities") pod "72076617-9e8e-4ddf-8d6a-73086a29ed31" (UID: "72076617-9e8e-4ddf-8d6a-73086a29ed31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.584620 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7" (OuterVolumeSpecName: "kube-api-access-rnmb7") pod "72076617-9e8e-4ddf-8d6a-73086a29ed31" (UID: "72076617-9e8e-4ddf-8d6a-73086a29ed31"). InnerVolumeSpecName "kube-api-access-rnmb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.606515 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72076617-9e8e-4ddf-8d6a-73086a29ed31" (UID: "72076617-9e8e-4ddf-8d6a-73086a29ed31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.682309 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.682339 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnmb7\" (UniqueName: \"kubernetes.io/projected/72076617-9e8e-4ddf-8d6a-73086a29ed31-kube-api-access-rnmb7\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.682352 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72076617-9e8e-4ddf-8d6a-73086a29ed31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.989082 4938 generic.go:334] "Generic (PLEG): container finished" podID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerID="12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff" exitCode=0 Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.989162 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerDied","Data":"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff"} Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.989196 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gzfvn" event={"ID":"72076617-9e8e-4ddf-8d6a-73086a29ed31","Type":"ContainerDied","Data":"eadf70ede0e177877178d4794da7a27c39c0133865f09f6e399204dfde6b17f2"} Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.989225 4938 scope.go:117] "RemoveContainer" containerID="12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff" Nov 22 11:45:30 crc kubenswrapper[4938]: I1122 11:45:30.989386 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gzfvn" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.019943 4938 scope.go:117] "RemoveContainer" containerID="142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.046327 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.060826 4938 scope.go:117] "RemoveContainer" containerID="ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.062054 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gzfvn"] Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.096579 4938 scope.go:117] "RemoveContainer" containerID="12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff" Nov 22 11:45:31 crc kubenswrapper[4938]: E1122 11:45:31.097128 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff\": container with ID starting with 12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff not found: ID does not exist" containerID="12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.097179 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff"} err="failed to get container status \"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff\": rpc error: code = NotFound desc = could not find container \"12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff\": container with ID starting with 12edc71e14a8631b778854ada933d311d8ae90675912b5dfeafb7843ddb0d1ff not found: ID does not exist" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.097216 4938 scope.go:117] "RemoveContainer" containerID="142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598" Nov 22 11:45:31 crc kubenswrapper[4938]: E1122 11:45:31.097594 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598\": container with ID starting with 142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598 not found: ID does not exist" containerID="142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.097626 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598"} err="failed to get container status \"142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598\": rpc error: code = NotFound desc = could not find container \"142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598\": container with ID starting with 142d61ce33fff2816365c50fc0523f8abb0615893027b4a6d73ffa5a276e4598 not found: ID does not exist" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.097645 4938 scope.go:117] "RemoveContainer" containerID="ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2" Nov 22 11:45:31 crc kubenswrapper[4938]: E1122 11:45:31.097904 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2\": container with ID starting with ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2 not found: ID does not exist" containerID="ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2" Nov 22 11:45:31 crc kubenswrapper[4938]: I1122 11:45:31.097949 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2"} err="failed to get container status \"ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2\": rpc error: code = NotFound desc = could not find container \"ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2\": container with ID starting with ecd1b08fadb1472608386041108dec69df50c7b9de178afbeb029f1464447ea2 not found: ID does not exist" Nov 22 11:45:32 crc kubenswrapper[4938]: I1122 11:45:32.459397 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" path="/var/lib/kubelet/pods/72076617-9e8e-4ddf-8d6a-73086a29ed31/volumes" Nov 22 11:45:41 crc kubenswrapper[4938]: I1122 11:45:41.301262 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:45:41 crc kubenswrapper[4938]: I1122 11:45:41.301970 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:45:44 crc kubenswrapper[4938]: I1122 11:45:44.498364 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbdfff8c8-z9wrl_86ceb17d-9778-45f9-a75e-ed96d5abe722/barbican-api/0.log" Nov 22 11:45:44 crc kubenswrapper[4938]: I1122 11:45:44.622477 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbdfff8c8-z9wrl_86ceb17d-9778-45f9-a75e-ed96d5abe722/barbican-api-log/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.184081 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6546d544c4-wcfbr_0d27fa20-5741-49e1-a69c-5f3c856bea32/barbican-keystone-listener/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.232772 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6865f54775-4zkqs_796676ca-fdb7-4ac2-9092-73d2ac3ac760/barbican-worker/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.250442 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6546d544c4-wcfbr_0d27fa20-5741-49e1-a69c-5f3c856bea32/barbican-keystone-listener-log/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.370430 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6865f54775-4zkqs_796676ca-fdb7-4ac2-9092-73d2ac3ac760/barbican-worker-log/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.477161 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-bqjb8_a53e09b5-739a-427e-b8f4-48fd612e9b07/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.630129 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/proxy-httpd/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.661902 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/ceilometer-central-agent/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.699323 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/ceilometer-notification-agent/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.708038 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_30d989f4-c834-49a2-aeaf-6478a2318852/sg-core/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.883647 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2048ffcd-1faf-44c6-a1e6-425501f44282/cinder-api-log/0.log" Nov 22 11:45:45 crc kubenswrapper[4938]: I1122 11:45:45.923166 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_2048ffcd-1faf-44c6-a1e6-425501f44282/cinder-api/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.114305 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4c5c1e05-59a2-49d5-9bbc-315dc537b994/cinder-scheduler/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.116059 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_4c5c1e05-59a2-49d5-9bbc-315dc537b994/probe/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.203315 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-6wp6w_66ca7351-72c9-401f-8602-f7a34033d228/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.332217 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-zvzlz_85b362e8-25b1-4ed4-8c6c-8fdb1c84e296/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.392466 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/init/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.899624 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-98krt_073859e3-9fc9-45e3-a311-34411cea1556/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.900088 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/init/0.log" Nov 22 11:45:46 crc kubenswrapper[4938]: I1122 11:45:46.967085 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-78c64bc9c5-qqnx6_7b526fbb-fe31-4192-8756-67eaea9b813d/dnsmasq-dns/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.132252 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_daf50ab9-a17b-4d53-a2f5-a1f11ed8455e/glance-log/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.148260 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_daf50ab9-a17b-4d53-a2f5-a1f11ed8455e/glance-httpd/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.307390 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8585da6c-5a29-4a79-9aa4-5385381dfd08/glance-httpd/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.339037 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8585da6c-5a29-4a79-9aa4-5385381dfd08/glance-log/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.526077 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7bb4f8b4bd-qj489_52d01853-e609-4339-a336-78e1b9f4f704/horizon/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.633647 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-rthm2_71e3a6a6-d91c-416c-9ec6-43429dd10097/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.825413 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7bb4f8b4bd-qj489_52d01853-e609-4339-a336-78e1b9f4f704/horizon-log/0.log" Nov 22 11:45:47 crc kubenswrapper[4938]: I1122 11:45:47.840296 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-255m5_578cbfd4-2f90-4d71-ac4f-ccdb9f00629f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.056884 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29396821-fw2vs_ad31137e-3dac-4a06-9b17-e54340147400/keystone-cron/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.071812 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6bf8df76c8-5c2xm_26b58116-00b3-49d5-bf76-d262754d9cfb/keystone-api/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.104685 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_190d6459-e173-4817-a60a-b204a9a4bf68/kube-state-metrics/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.316585 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-gcd75_6b52293d-9695-46ab-8248-af8bb1a3c464/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.614174 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7768cc7979-hrkwz_c4f7f822-da01-4216-9e4c-5ee8a9aa8495/neutron-httpd/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.629523 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-qssk9_2dc63cc5-838b-4bdf-86fe-46ede44788b3/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:48 crc kubenswrapper[4938]: I1122 11:45:48.632725 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7768cc7979-hrkwz_c4f7f822-da01-4216-9e4c-5ee8a9aa8495/neutron-api/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.165090 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5b5cb627-ee85-42aa-95e9-ece522c218a4/nova-api-log/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.277747 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_59fde2f8-5c0c-44ab-a02b-87115af94dc0/nova-cell0-conductor-conductor/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.543713 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_6eb58a07-3ce8-43b4-a75f-a883df1d1e02/nova-cell1-conductor-conductor/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.645783 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5b5cb627-ee85-42aa-95e9-ece522c218a4/nova-api-api/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.650609 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d4c78748-ba52-4ebe-b136-07c4f0d939df/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.782296 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-sjbl5_6b2c795f-d47e-411a-a1c0-f59ed58d9506/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:49 crc kubenswrapper[4938]: I1122 11:45:49.940021 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_209e9603-14ba-4706-87a0-00ea7f2bd737/nova-metadata-log/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.259353 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/mysql-bootstrap/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.321207 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7fb1a3ef-83a7-467e-9cd0-94310d410729/nova-scheduler-scheduler/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.441836 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/mysql-bootstrap/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.468375 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_0404ed3a-da0f-4ba3-953b-e1f3dca9d53b/galera/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.781275 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/mysql-bootstrap/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.919609 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/mysql-bootstrap/0.log" Nov 22 11:45:50 crc kubenswrapper[4938]: I1122 11:45:50.967253 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1e2caea9-4690-48c1-909a-05ba8dbf34d4/galera/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.163359 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_209e9603-14ba-4706-87a0-00ea7f2bd737/nova-metadata-metadata/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.171132 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_cc16df53-2254-4dc7-8914-88afcbc0b5c4/openstackclient/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.262204 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-48rkq_a3fc26cc-0b37-4f3b-a4ed-fd5817d30df7/openstack-network-exporter/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.376582 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server-init/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.506091 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server-init/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.565307 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovsdb-server/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.575440 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dg79z_62cfd59e-5bd6-48ef-9990-1f29ec6d155a/ovs-vswitchd/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.697327 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-sq62w_a2ffb7f9-f83c-4e71-af53-3d116e260d8e/ovn-controller/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.836969 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wn6rr_0c6fb3fe-7488-44ba-a5fc-d24f04f40dec/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:51 crc kubenswrapper[4938]: I1122 11:45:51.944939 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_05abbe70-d68c-4b0b-a6a3-580b764f3014/openstack-network-exporter/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.041535 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_05abbe70-d68c-4b0b-a6a3-580b764f3014/ovn-northd/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.131510 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_0e21a120-bf8c-43ad-b7fe-48e11f0a0545/openstack-network-exporter/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.170031 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_0e21a120-bf8c-43ad-b7fe-48e11f0a0545/ovsdbserver-nb/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.325859 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e7f61b47-1155-4776-acf0-0cb9ea53af1a/ovsdbserver-sb/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.338696 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e7f61b47-1155-4776-acf0-0cb9ea53af1a/openstack-network-exporter/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.578818 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5d64bdcd96-8b525_70cbbc4a-ed36-471c-9b65-5eea9fc87891/placement-api/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.643370 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5d64bdcd96-8b525_70cbbc4a-ed36-471c-9b65-5eea9fc87891/placement-log/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.703999 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/setup-container/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.893242 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/rabbitmq/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.978432 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_fc5cb0aa-c3a0-436c-b911-6029b94775a8/setup-container/0.log" Nov 22 11:45:52 crc kubenswrapper[4938]: I1122 11:45:52.990783 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/setup-container/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.128448 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/setup-container/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.197117 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-sjz72_82ac5576-d4c3-4bb0-a2f3-2f6da7605821/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.204029 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_30956ae1-6658-45ca-867e-12fb808394db/rabbitmq/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.438719 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-mbmjk_ad415672-c278-4e60-b205-ff929432c200/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.454015 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-87l4v_ac9be71f-c722-4d3e-b43a-7dffaa096daf/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.646477 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-s94th_146c45e8-f683-48dd-99b4-02d5eeab729d/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.739102 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-9895f_a66a985f-423c-4438-b9cc-ad5cbc582077/ssh-known-hosts-edpm-deployment/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.937739 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77b7545b4c-scc96_d1ea1eda-81bc-455d-9f0d-68324fbe5992/proxy-server/0.log" Nov 22 11:45:53 crc kubenswrapper[4938]: I1122 11:45:53.989641 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-77b7545b4c-scc96_d1ea1eda-81bc-455d-9f0d-68324fbe5992/proxy-httpd/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.060850 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-xn65g_48baac61-428d-4d1d-aa99-39c8ca12e251/swift-ring-rebalance/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.724682 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-reaper/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.742257 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-auditor/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.800510 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-replicator/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.928983 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/account-server/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.948001 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-auditor/0.log" Nov 22 11:45:54 crc kubenswrapper[4938]: I1122 11:45:54.990338 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-replicator/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.126878 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-updater/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.127100 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/container-server/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.172223 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-auditor/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.218417 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-expirer/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.338868 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-replicator/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.366214 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-server/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.404668 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/object-updater/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.457979 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/rsync/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.602739 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_7a0f58e0-5202-4792-bd1a-64966c18450f/swift-recon-cron/0.log" Nov 22 11:45:55 crc kubenswrapper[4938]: I1122 11:45:55.661138 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-2bvhm_979e4133-a50f-45d0-9eb3-7f684d65c4ce/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:45:56 crc kubenswrapper[4938]: I1122 11:45:56.222965 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_aeda96c8-4659-41c2-947d-f10a1c61bee0/test-operator-logs-container/0.log" Nov 22 11:45:56 crc kubenswrapper[4938]: I1122 11:45:56.253051 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_7624b768-90d5-4bad-b97e-21ea6549679a/tempest-tests-tempest-tests-runner/0.log" Nov 22 11:45:56 crc kubenswrapper[4938]: I1122 11:45:56.418149 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-46d5c_595a1412-b3d2-40ba-bb26-98cd27d79480/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:46:04 crc kubenswrapper[4938]: I1122 11:46:04.547893 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_7e219ab3-870c-4d69-99b6-79758b76a271/memcached/0.log" Nov 22 11:46:11 crc kubenswrapper[4938]: I1122 11:46:11.301304 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:46:11 crc kubenswrapper[4938]: I1122 11:46:11.302966 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.534958 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.696873 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.744684 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.757101 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.906619 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/pull/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.969698 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/util/0.log" Nov 22 11:46:20 crc kubenswrapper[4938]: I1122 11:46:20.979748 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1bf3d28a711035aae8e0af644764edd86da0d97631b5988225039dced6fc42c_04a11c92-d0c4-462d-ad17-c81256ce817c/extract/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.077022 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-mrpj9_dde3bedc-34b4-41e0-adba-78c802591de5/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.134938 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-jhgfc_cba844df-58bc-4d1e-989c-9eb4ccb036b6/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.214217 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-mrpj9_dde3bedc-34b4-41e0-adba-78c802591de5/manager/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.323856 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-jhgfc_cba844df-58bc-4d1e-989c-9eb4ccb036b6/manager/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.358822 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-8b8mj_2415b98c-1a50-4f8d-b094-de51a90a0088/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.398269 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-8b8mj_2415b98c-1a50-4f8d-b094-de51a90a0088/manager/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.564857 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6f95d84fd6-s2d7b_a8e3c6f8-4a77-4180-a67b-3dab37169c07/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.670390 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6f95d84fd6-s2d7b_a8e3c6f8-4a77-4180-a67b-3dab37169c07/manager/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.721691 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-j8ftc_496d9a6a-3979-43af-aa47-9161506bc8e9/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.773988 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-j8ftc_496d9a6a-3979-43af-aa47-9161506bc8e9/manager/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.861459 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-r9lgr_8659e26c-11d1-4a24-82e3-42e9737a54b8/kube-rbac-proxy/0.log" Nov 22 11:46:21 crc kubenswrapper[4938]: I1122 11:46:21.923230 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-r9lgr_8659e26c-11d1-4a24-82e3-42e9737a54b8/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.001028 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-z8ksz_9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.155945 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-z8ksz_9cd95fb2-a07b-4e6d-b9ed-6796ee31ee95/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.172452 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-tb846_2a30b9b0-97ac-4268-8d85-193fa80c6b01/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.219616 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-tb846_2a30b9b0-97ac-4268-8d85-193fa80c6b01/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.346294 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-gxvrt_65118a1f-ed5e-4354-8494-4df42ff6ae6a/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.377996 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-gxvrt_65118a1f-ed5e-4354-8494-4df42ff6ae6a/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.505929 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-gc4lw_fd298c00-9118-413b-bce4-1198393538fa/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.509821 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-646fd589f9-gc4lw_fd298c00-9118-413b-bce4-1198393538fa/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.578937 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qq5ww_c933df24-871e-4075-b48f-f8903914716b/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.685724 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-qq5ww_c933df24-871e-4075-b48f-f8903914716b/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.757856 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-xjw2q_46844239-10fa-433c-bd82-565bf911989c/kube-rbac-proxy/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.794556 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-xjw2q_46844239-10fa-433c-bd82-565bf911989c/manager/0.log" Nov 22 11:46:22 crc kubenswrapper[4938]: I1122 11:46:22.886251 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dzpvv_ca599052-ab51-498f-882d-895854e272c4/kube-rbac-proxy/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.011978 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-dzpvv_ca599052-ab51-498f-882d-895854e272c4/manager/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.049862 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-qps9n_1016b115-4617-4a19-a992-91dd5b124c9b/kube-rbac-proxy/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.105293 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-qps9n_1016b115-4617-4a19-a992-91dd5b124c9b/manager/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.698178 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-2l2nm_7e8f66c5-67cb-428e-bc4d-9e6e893af682/kube-rbac-proxy/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.703363 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-2l2nm_7e8f66c5-67cb-428e-bc4d-9e6e893af682/manager/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.838886 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f4bc68b84-bqqvf_7fc10dd9-2ded-4a21-badc-6e8bd9615dd1/kube-rbac-proxy/0.log" Nov 22 11:46:23 crc kubenswrapper[4938]: I1122 11:46:23.973115 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6d45d44995-nrhm2_f4501035-7ea2-41f7-a3d4-12ab72d52a0c/kube-rbac-proxy/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.218593 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-j4l4j_122aa2ac-8dc3-4698-818b-120126fb039b/registry-server/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.244113 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6d45d44995-nrhm2_f4501035-7ea2-41f7-a3d4-12ab72d52a0c/operator/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.389787 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-dx2bq_ee7a691c-6232-4e30-b1bf-400c65b8b127/kube-rbac-proxy/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.501936 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-dx2bq_ee7a691c-6232-4e30-b1bf-400c65b8b127/manager/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.538831 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-98nqr_454affdc-b63c-4696-914f-f2abbf7896ca/kube-rbac-proxy/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.610427 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-98nqr_454affdc-b63c-4696-914f-f2abbf7896ca/manager/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.805516 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f4bc68b84-bqqvf_7fc10dd9-2ded-4a21-badc-6e8bd9615dd1/manager/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.828843 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5r8n9_6ceba7c3-c04c-4449-9788-ed341bdaceb7/kube-rbac-proxy/0.log" Nov 22 11:46:24 crc kubenswrapper[4938]: I1122 11:46:24.836202 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-wmt6p_27df649b-2572-42d7-a137-6a82a01c482a/operator/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.494350 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5r8n9_6ceba7c3-c04c-4449-9788-ed341bdaceb7/manager/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.494675 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-v457z_dbade39a-90d4-49d8-96cc-0a5175783ac1/kube-rbac-proxy/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.559496 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-v457z_dbade39a-90d4-49d8-96cc-0a5175783ac1/manager/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.671491 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-s5pww_584df814-c2c1-4566-a8d0-930b14020095/kube-rbac-proxy/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.681604 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-77db6bf9c-s5pww_584df814-c2c1-4566-a8d0-930b14020095/manager/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.739377 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-4rmnr_98d39c17-a9b0-483d-b170-eb006b5ee4b9/kube-rbac-proxy/0.log" Nov 22 11:46:25 crc kubenswrapper[4938]: I1122 11:46:25.800151 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-4rmnr_98d39c17-a9b0-483d-b170-eb006b5ee4b9/manager/0.log" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.256101 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6xczv_fa995a8f-e92f-45fa-8926-73cea902f283/control-plane-machine-set-operator/0.log" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.300986 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.301057 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.301110 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.301971 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.302045 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98" gracePeriod=600 Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.458056 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rqzh7_44da01b3-b33a-402b-9bc1-ceea816d801b/kube-rbac-proxy/0.log" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.484355 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rqzh7_44da01b3-b33a-402b-9bc1-ceea816d801b/machine-api-operator/0.log" Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.620697 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98" exitCode=0 Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.620872 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98"} Nov 22 11:46:41 crc kubenswrapper[4938]: I1122 11:46:41.621125 4938 scope.go:117] "RemoveContainer" containerID="373cfe1b3cd2221fd8ea760ce2646713c70c7fbcb17142ddad6c7ba90a4b68af" Nov 22 11:46:42 crc kubenswrapper[4938]: I1122 11:46:42.631989 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerStarted","Data":"9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00"} Nov 22 11:46:54 crc kubenswrapper[4938]: I1122 11:46:54.266230 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-vzl7q_58946c83-00e0-4427-8232-d44f5f8f10e0/cert-manager-controller/0.log" Nov 22 11:46:54 crc kubenswrapper[4938]: I1122 11:46:54.431013 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-25mlz_3faf7780-9f49-4f23-ac17-454fbeed3e79/cert-manager-cainjector/0.log" Nov 22 11:46:54 crc kubenswrapper[4938]: I1122 11:46:54.478267 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-cj5bz_17da633d-3102-4583-ba74-5c67cfb859c6/cert-manager-webhook/0.log" Nov 22 11:47:06 crc kubenswrapper[4938]: I1122 11:47:06.601859 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-msh52_6d740b57-09d5-44f9-90c2-bf2cfeb44311/nmstate-console-plugin/0.log" Nov 22 11:47:07 crc kubenswrapper[4938]: I1122 11:47:07.411895 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-xfvp7_475ad90f-3054-4344-885d-9fe424557efd/kube-rbac-proxy/0.log" Nov 22 11:47:07 crc kubenswrapper[4938]: I1122 11:47:07.429024 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-jhvqn_286573ca-1f3c-4af4-8bfb-ba8b52224082/nmstate-handler/0.log" Nov 22 11:47:07 crc kubenswrapper[4938]: I1122 11:47:07.452585 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-xfvp7_475ad90f-3054-4344-885d-9fe424557efd/nmstate-metrics/0.log" Nov 22 11:47:07 crc kubenswrapper[4938]: I1122 11:47:07.607380 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-g9mdw_f899d59c-f579-4865-985d-d87c4fc54922/nmstate-operator/0.log" Nov 22 11:47:07 crc kubenswrapper[4938]: I1122 11:47:07.663117 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-jzqlj_32b53aaa-95c2-4834-a57d-955709a2e992/nmstate-webhook/0.log" Nov 22 11:47:20 crc kubenswrapper[4938]: I1122 11:47:20.969169 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-b667v_a4e581ed-7db0-4270-9353-ab48412b2994/kube-rbac-proxy/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.074674 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-b667v_a4e581ed-7db0-4270-9353-ab48412b2994/controller/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.139519 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.347335 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.348236 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.352175 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.411828 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.551264 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.589835 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.614170 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.615546 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.784423 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-frr-files/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.804272 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-metrics/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.804558 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/cp-reloader/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.839119 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/controller/0.log" Nov 22 11:47:21 crc kubenswrapper[4938]: I1122 11:47:21.974947 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/kube-rbac-proxy/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.002891 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/frr-metrics/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.069661 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/kube-rbac-proxy-frr/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.245540 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/reloader/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.262283 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-gvwwc_01cf09eb-20ee-4493-9b69-49beca431020/frr-k8s-webhook-server/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.467217 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-868865b9b5-bxgn8_0ff86a67-1bc4-4f45-82ae-cd10727037d6/manager/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.624187 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-66d55db99c-wjdl4_2490230a-e04c-4569-8870-174b949c7ce6/webhook-server/0.log" Nov 22 11:47:22 crc kubenswrapper[4938]: I1122 11:47:22.676633 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5l59v_8fde61c9-db97-436d-8ee1-852084695193/kube-rbac-proxy/0.log" Nov 22 11:47:23 crc kubenswrapper[4938]: I1122 11:47:23.218305 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5l59v_8fde61c9-db97-436d-8ee1-852084695193/speaker/0.log" Nov 22 11:47:23 crc kubenswrapper[4938]: I1122 11:47:23.382556 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9gsl5_9c76a23c-e78d-422c-90aa-7cb20ab288c6/frr/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.538326 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.675501 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.676743 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.759612 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.927147 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/extract/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.954893 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/pull/0.log" Nov 22 11:47:36 crc kubenswrapper[4938]: I1122 11:47:36.963919 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772epcm6j_645aab0f-dd96-4c5b-bb69-85d98c54bfe1/util/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.098534 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.271308 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.287225 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.315556 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.466775 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-content/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.475628 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/extract-utilities/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.653036 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.867183 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.904661 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.975714 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-n7p72_8e1c77e8-d7ac-457a-945e-1fffe49dc82e/registry-server/0.log" Nov 22 11:47:37 crc kubenswrapper[4938]: I1122 11:47:37.978401 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.065425 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-utilities/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.096857 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/extract-content/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.256125 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.758894 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.771119 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.803268 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.844808 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-qjkpw_6778dbfd-75df-4429-9a78-4ea6225eae52/registry-server/0.log" Nov 22 11:47:38 crc kubenswrapper[4938]: I1122 11:47:38.995895 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/extract/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.001441 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/util/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.027263 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6s6gjs_6884c2e1-43b6-4b1c-b4ed-7af23a04b5c7/pull/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.181939 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.189366 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-hscj2_88051af2-c7b9-45b2-a1a5-2c1a025a271b/marketplace-operator/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.345324 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.355880 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.373890 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.544257 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-utilities/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.550860 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/extract-content/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.687812 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-7nmjx_93c1a9c7-8810-4ee4-977f-c18fc37b10ec/registry-server/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.776209 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.928860 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.933809 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:47:39 crc kubenswrapper[4938]: I1122 11:47:39.938779 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:47:40 crc kubenswrapper[4938]: I1122 11:47:40.115319 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-utilities/0.log" Nov 22 11:47:40 crc kubenswrapper[4938]: I1122 11:47:40.154967 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/extract-content/0.log" Nov 22 11:47:40 crc kubenswrapper[4938]: I1122 11:47:40.627844 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lxs6w_f4406526-10df-413c-87df-4aa065d6ecfb/registry-server/0.log" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.342055 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:01 crc kubenswrapper[4938]: E1122 11:48:01.343178 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="extract-content" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343220 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="extract-content" Nov 22 11:48:01 crc kubenswrapper[4938]: E1122 11:48:01.343239 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="extract-utilities" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343250 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="extract-utilities" Nov 22 11:48:01 crc kubenswrapper[4938]: E1122 11:48:01.343297 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="registry-server" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343303 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="registry-server" Nov 22 11:48:01 crc kubenswrapper[4938]: E1122 11:48:01.343319 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52871c97-c1d3-4aac-af04-66333789a243" containerName="container-00" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343327 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="52871c97-c1d3-4aac-af04-66333789a243" containerName="container-00" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343592 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="52871c97-c1d3-4aac-af04-66333789a243" containerName="container-00" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.343626 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="72076617-9e8e-4ddf-8d6a-73086a29ed31" containerName="registry-server" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.345439 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.366108 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.456427 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrsz9\" (UniqueName: \"kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.456483 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.456602 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.557749 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.557894 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrsz9\" (UniqueName: \"kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.557950 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.558295 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.558342 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.577939 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrsz9\" (UniqueName: \"kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9\") pod \"redhat-operators-b6flp\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:01 crc kubenswrapper[4938]: I1122 11:48:01.670398 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:02 crc kubenswrapper[4938]: I1122 11:48:02.225444 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:02 crc kubenswrapper[4938]: I1122 11:48:02.300190 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerStarted","Data":"f2a58dcd142dcbc6012613992386ce7dbb18773ab401da002d98af8e72154cea"} Nov 22 11:48:03 crc kubenswrapper[4938]: I1122 11:48:03.308623 4938 generic.go:334] "Generic (PLEG): container finished" podID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerID="575a3bb3e1a8496736861506a1bfa9b8ac501030d05a17d93da2f99b17ece3a9" exitCode=0 Nov 22 11:48:03 crc kubenswrapper[4938]: I1122 11:48:03.308737 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerDied","Data":"575a3bb3e1a8496736861506a1bfa9b8ac501030d05a17d93da2f99b17ece3a9"} Nov 22 11:48:04 crc kubenswrapper[4938]: I1122 11:48:04.322538 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerStarted","Data":"c0e0a17cf16252cb53b6de894904786aa0542cbebf54e0a6ea8e425536db0c48"} Nov 22 11:48:06 crc kubenswrapper[4938]: I1122 11:48:06.340561 4938 generic.go:334] "Generic (PLEG): container finished" podID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerID="c0e0a17cf16252cb53b6de894904786aa0542cbebf54e0a6ea8e425536db0c48" exitCode=0 Nov 22 11:48:06 crc kubenswrapper[4938]: I1122 11:48:06.340620 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerDied","Data":"c0e0a17cf16252cb53b6de894904786aa0542cbebf54e0a6ea8e425536db0c48"} Nov 22 11:48:07 crc kubenswrapper[4938]: I1122 11:48:07.351731 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerStarted","Data":"d77c1d6725f6bd5b4ddfbe504c64dadc0db9f9e7bda937ba99cd24e04d3cf6f7"} Nov 22 11:48:07 crc kubenswrapper[4938]: I1122 11:48:07.374221 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b6flp" podStartSLOduration=2.927581418 podStartE2EDuration="6.374189854s" podCreationTimestamp="2025-11-22 11:48:01 +0000 UTC" firstStartedPulling="2025-11-22 11:48:03.310651564 +0000 UTC m=+4215.778488963" lastFinishedPulling="2025-11-22 11:48:06.75726 +0000 UTC m=+4219.225097399" observedRunningTime="2025-11-22 11:48:07.369577259 +0000 UTC m=+4219.837414658" watchObservedRunningTime="2025-11-22 11:48:07.374189854 +0000 UTC m=+4219.842027263" Nov 22 11:48:11 crc kubenswrapper[4938]: I1122 11:48:11.671569 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:11 crc kubenswrapper[4938]: I1122 11:48:11.672291 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:12 crc kubenswrapper[4938]: I1122 11:48:12.723556 4938 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b6flp" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="registry-server" probeResult="failure" output=< Nov 22 11:48:12 crc kubenswrapper[4938]: timeout: failed to connect service ":50051" within 1s Nov 22 11:48:12 crc kubenswrapper[4938]: > Nov 22 11:48:22 crc kubenswrapper[4938]: I1122 11:48:22.366194 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:22 crc kubenswrapper[4938]: I1122 11:48:22.444803 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:22 crc kubenswrapper[4938]: I1122 11:48:22.606730 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:23 crc kubenswrapper[4938]: I1122 11:48:23.493399 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b6flp" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="registry-server" containerID="cri-o://d77c1d6725f6bd5b4ddfbe504c64dadc0db9f9e7bda937ba99cd24e04d3cf6f7" gracePeriod=2 Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.506258 4938 generic.go:334] "Generic (PLEG): container finished" podID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerID="d77c1d6725f6bd5b4ddfbe504c64dadc0db9f9e7bda937ba99cd24e04d3cf6f7" exitCode=0 Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.506364 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerDied","Data":"d77c1d6725f6bd5b4ddfbe504c64dadc0db9f9e7bda937ba99cd24e04d3cf6f7"} Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.506587 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b6flp" event={"ID":"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6","Type":"ContainerDied","Data":"f2a58dcd142dcbc6012613992386ce7dbb18773ab401da002d98af8e72154cea"} Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.506605 4938 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2a58dcd142dcbc6012613992386ce7dbb18773ab401da002d98af8e72154cea" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.518606 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.594118 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities\") pod \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.594219 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content\") pod \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.594435 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrsz9\" (UniqueName: \"kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9\") pod \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\" (UID: \"bd3c370c-64b6-4d6d-a765-a5a84fda9fa6\") " Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.595094 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities" (OuterVolumeSpecName: "utilities") pod "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" (UID: "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.595885 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.600044 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9" (OuterVolumeSpecName: "kube-api-access-vrsz9") pod "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" (UID: "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6"). InnerVolumeSpecName "kube-api-access-vrsz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.698064 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" (UID: "bd3c370c-64b6-4d6d-a765-a5a84fda9fa6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.698200 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:48:24 crc kubenswrapper[4938]: I1122 11:48:24.698240 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrsz9\" (UniqueName: \"kubernetes.io/projected/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6-kube-api-access-vrsz9\") on node \"crc\" DevicePath \"\"" Nov 22 11:48:25 crc kubenswrapper[4938]: I1122 11:48:25.519677 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b6flp" Nov 22 11:48:25 crc kubenswrapper[4938]: I1122 11:48:25.574113 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:25 crc kubenswrapper[4938]: I1122 11:48:25.589434 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b6flp"] Nov 22 11:48:26 crc kubenswrapper[4938]: I1122 11:48:26.483986 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" path="/var/lib/kubelet/pods/bd3c370c-64b6-4d6d-a765-a5a84fda9fa6/volumes" Nov 22 11:48:41 crc kubenswrapper[4938]: I1122 11:48:41.300878 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:48:41 crc kubenswrapper[4938]: I1122 11:48:41.303302 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:49:11 crc kubenswrapper[4938]: I1122 11:49:11.301490 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:49:11 crc kubenswrapper[4938]: I1122 11:49:11.302627 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:49:20 crc kubenswrapper[4938]: I1122 11:49:20.055674 4938 generic.go:334] "Generic (PLEG): container finished" podID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerID="ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450" exitCode=0 Nov 22 11:49:20 crc kubenswrapper[4938]: I1122 11:49:20.055769 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-jj4sz/must-gather-c6867" event={"ID":"de2b429f-17b4-4115-9b1e-45d6a5c37446","Type":"ContainerDied","Data":"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450"} Nov 22 11:49:20 crc kubenswrapper[4938]: I1122 11:49:20.056696 4938 scope.go:117] "RemoveContainer" containerID="ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450" Nov 22 11:49:20 crc kubenswrapper[4938]: I1122 11:49:20.333200 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jj4sz_must-gather-c6867_de2b429f-17b4-4115-9b1e-45d6a5c37446/gather/0.log" Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.405070 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-jj4sz/must-gather-c6867"] Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.405952 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-jj4sz/must-gather-c6867" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="copy" containerID="cri-o://2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae" gracePeriod=2 Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.411423 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-jj4sz/must-gather-c6867"] Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.816635 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jj4sz_must-gather-c6867_de2b429f-17b4-4115-9b1e-45d6a5c37446/copy/0.log" Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.817629 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.950611 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk\") pod \"de2b429f-17b4-4115-9b1e-45d6a5c37446\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.950651 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output\") pod \"de2b429f-17b4-4115-9b1e-45d6a5c37446\" (UID: \"de2b429f-17b4-4115-9b1e-45d6a5c37446\") " Nov 22 11:49:29 crc kubenswrapper[4938]: I1122 11:49:29.956146 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk" (OuterVolumeSpecName: "kube-api-access-s57kk") pod "de2b429f-17b4-4115-9b1e-45d6a5c37446" (UID: "de2b429f-17b4-4115-9b1e-45d6a5c37446"). InnerVolumeSpecName "kube-api-access-s57kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.052386 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s57kk\" (UniqueName: \"kubernetes.io/projected/de2b429f-17b4-4115-9b1e-45d6a5c37446-kube-api-access-s57kk\") on node \"crc\" DevicePath \"\"" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.077137 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "de2b429f-17b4-4115-9b1e-45d6a5c37446" (UID: "de2b429f-17b4-4115-9b1e-45d6a5c37446"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.147517 4938 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-jj4sz_must-gather-c6867_de2b429f-17b4-4115-9b1e-45d6a5c37446/copy/0.log" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.148060 4938 generic.go:334] "Generic (PLEG): container finished" podID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerID="2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae" exitCode=143 Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.148106 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-jj4sz/must-gather-c6867" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.148123 4938 scope.go:117] "RemoveContainer" containerID="2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.154786 4938 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de2b429f-17b4-4115-9b1e-45d6a5c37446-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.171203 4938 scope.go:117] "RemoveContainer" containerID="ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.232146 4938 scope.go:117] "RemoveContainer" containerID="2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae" Nov 22 11:49:30 crc kubenswrapper[4938]: E1122 11:49:30.232618 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae\": container with ID starting with 2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae not found: ID does not exist" containerID="2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.232687 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae"} err="failed to get container status \"2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae\": rpc error: code = NotFound desc = could not find container \"2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae\": container with ID starting with 2f1e108b10fc9c703fa65c68877518baaed5a6bb07ffc89d5edbf27817383bae not found: ID does not exist" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.232722 4938 scope.go:117] "RemoveContainer" containerID="ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450" Nov 22 11:49:30 crc kubenswrapper[4938]: E1122 11:49:30.233331 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450\": container with ID starting with ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450 not found: ID does not exist" containerID="ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.233613 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450"} err="failed to get container status \"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450\": rpc error: code = NotFound desc = could not find container \"ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450\": container with ID starting with ac66d9e3118448f1a72223c7c6165881d95b079f6f76cad39561afcc37822450 not found: ID does not exist" Nov 22 11:49:30 crc kubenswrapper[4938]: I1122 11:49:30.460795 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" path="/var/lib/kubelet/pods/de2b429f-17b4-4115-9b1e-45d6a5c37446/volumes" Nov 22 11:49:41 crc kubenswrapper[4938]: I1122 11:49:41.300170 4938 patch_prober.go:28] interesting pod/machine-config-daemon-slzgc container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:49:41 crc kubenswrapper[4938]: I1122 11:49:41.300657 4938 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:49:41 crc kubenswrapper[4938]: I1122 11:49:41.300709 4938 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" Nov 22 11:49:41 crc kubenswrapper[4938]: I1122 11:49:41.301399 4938 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00"} pod="openshift-machine-config-operator/machine-config-daemon-slzgc" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:49:41 crc kubenswrapper[4938]: I1122 11:49:41.301442 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" containerName="machine-config-daemon" containerID="cri-o://9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" gracePeriod=600 Nov 22 11:49:41 crc kubenswrapper[4938]: E1122 11:49:41.420931 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:49:42 crc kubenswrapper[4938]: I1122 11:49:42.275269 4938 generic.go:334] "Generic (PLEG): container finished" podID="e2b98cee-eb10-409f-93b6-153856457611" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" exitCode=0 Nov 22 11:49:42 crc kubenswrapper[4938]: I1122 11:49:42.275372 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" event={"ID":"e2b98cee-eb10-409f-93b6-153856457611","Type":"ContainerDied","Data":"9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00"} Nov 22 11:49:42 crc kubenswrapper[4938]: I1122 11:49:42.275654 4938 scope.go:117] "RemoveContainer" containerID="f97ea1baec385526d652aa6b038b467ecdad6d69960d9e2d89bdb7d60d6d8e98" Nov 22 11:49:42 crc kubenswrapper[4938]: I1122 11:49:42.276332 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:49:42 crc kubenswrapper[4938]: E1122 11:49:42.276651 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:49:55 crc kubenswrapper[4938]: I1122 11:49:55.447198 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:49:55 crc kubenswrapper[4938]: E1122 11:49:55.447850 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:50:07 crc kubenswrapper[4938]: I1122 11:50:07.447331 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:50:07 crc kubenswrapper[4938]: E1122 11:50:07.448094 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:50:20 crc kubenswrapper[4938]: I1122 11:50:20.447354 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:50:20 crc kubenswrapper[4938]: E1122 11:50:20.448098 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.398560 4938 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:22 crc kubenswrapper[4938]: E1122 11:50:22.399412 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="copy" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399429 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="copy" Nov 22 11:50:22 crc kubenswrapper[4938]: E1122 11:50:22.399453 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="extract-utilities" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399461 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="extract-utilities" Nov 22 11:50:22 crc kubenswrapper[4938]: E1122 11:50:22.399481 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="registry-server" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399489 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="registry-server" Nov 22 11:50:22 crc kubenswrapper[4938]: E1122 11:50:22.399520 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="gather" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399532 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="gather" Nov 22 11:50:22 crc kubenswrapper[4938]: E1122 11:50:22.399548 4938 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="extract-content" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399555 4938 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="extract-content" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399795 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="copy" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399815 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="de2b429f-17b4-4115-9b1e-45d6a5c37446" containerName="gather" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.399832 4938 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd3c370c-64b6-4d6d-a765-a5a84fda9fa6" containerName="registry-server" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.401543 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.426456 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.517928 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdsg2\" (UniqueName: \"kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.518069 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.518194 4938 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.619286 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdsg2\" (UniqueName: \"kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.619388 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.619477 4938 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.619949 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.620153 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.639823 4938 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdsg2\" (UniqueName: \"kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2\") pod \"certified-operators-92rkg\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:22 crc kubenswrapper[4938]: I1122 11:50:22.735134 4938 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:23 crc kubenswrapper[4938]: I1122 11:50:23.235255 4938 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:23 crc kubenswrapper[4938]: I1122 11:50:23.659741 4938 generic.go:334] "Generic (PLEG): container finished" podID="7deed5a6-08da-4567-834f-a18dabf85e7a" containerID="1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250" exitCode=0 Nov 22 11:50:23 crc kubenswrapper[4938]: I1122 11:50:23.659787 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerDied","Data":"1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250"} Nov 22 11:50:23 crc kubenswrapper[4938]: I1122 11:50:23.659833 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerStarted","Data":"97ab6bf6564772d5cec971da9d0e4477a4bf4568a94ed8ee41282364e22ea87e"} Nov 22 11:50:23 crc kubenswrapper[4938]: I1122 11:50:23.662984 4938 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:50:24 crc kubenswrapper[4938]: I1122 11:50:24.671456 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerStarted","Data":"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a"} Nov 22 11:50:25 crc kubenswrapper[4938]: I1122 11:50:25.682174 4938 generic.go:334] "Generic (PLEG): container finished" podID="7deed5a6-08da-4567-834f-a18dabf85e7a" containerID="2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a" exitCode=0 Nov 22 11:50:25 crc kubenswrapper[4938]: I1122 11:50:25.682223 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerDied","Data":"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a"} Nov 22 11:50:27 crc kubenswrapper[4938]: I1122 11:50:27.704195 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerStarted","Data":"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a"} Nov 22 11:50:27 crc kubenswrapper[4938]: I1122 11:50:27.723372 4938 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-92rkg" podStartSLOduration=2.902714028 podStartE2EDuration="5.723354096s" podCreationTimestamp="2025-11-22 11:50:22 +0000 UTC" firstStartedPulling="2025-11-22 11:50:23.662554402 +0000 UTC m=+4356.130391811" lastFinishedPulling="2025-11-22 11:50:26.48319445 +0000 UTC m=+4358.951031879" observedRunningTime="2025-11-22 11:50:27.719865069 +0000 UTC m=+4360.187702478" watchObservedRunningTime="2025-11-22 11:50:27.723354096 +0000 UTC m=+4360.191191495" Nov 22 11:50:32 crc kubenswrapper[4938]: I1122 11:50:32.735983 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:32 crc kubenswrapper[4938]: I1122 11:50:32.736509 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:32 crc kubenswrapper[4938]: I1122 11:50:32.782785 4938 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:33 crc kubenswrapper[4938]: I1122 11:50:33.792746 4938 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:33 crc kubenswrapper[4938]: I1122 11:50:33.840345 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:35 crc kubenswrapper[4938]: I1122 11:50:35.448418 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:50:35 crc kubenswrapper[4938]: E1122 11:50:35.449399 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:50:35 crc kubenswrapper[4938]: I1122 11:50:35.770840 4938 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-92rkg" podUID="7deed5a6-08da-4567-834f-a18dabf85e7a" containerName="registry-server" containerID="cri-o://9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a" gracePeriod=2 Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.266100 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.382295 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdsg2\" (UniqueName: \"kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2\") pod \"7deed5a6-08da-4567-834f-a18dabf85e7a\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.382484 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities\") pod \"7deed5a6-08da-4567-834f-a18dabf85e7a\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.382604 4938 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content\") pod \"7deed5a6-08da-4567-834f-a18dabf85e7a\" (UID: \"7deed5a6-08da-4567-834f-a18dabf85e7a\") " Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.383731 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities" (OuterVolumeSpecName: "utilities") pod "7deed5a6-08da-4567-834f-a18dabf85e7a" (UID: "7deed5a6-08da-4567-834f-a18dabf85e7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.389095 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2" (OuterVolumeSpecName: "kube-api-access-wdsg2") pod "7deed5a6-08da-4567-834f-a18dabf85e7a" (UID: "7deed5a6-08da-4567-834f-a18dabf85e7a"). InnerVolumeSpecName "kube-api-access-wdsg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.436741 4938 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7deed5a6-08da-4567-834f-a18dabf85e7a" (UID: "7deed5a6-08da-4567-834f-a18dabf85e7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.484884 4938 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.484943 4938 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7deed5a6-08da-4567-834f-a18dabf85e7a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.484964 4938 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdsg2\" (UniqueName: \"kubernetes.io/projected/7deed5a6-08da-4567-834f-a18dabf85e7a-kube-api-access-wdsg2\") on node \"crc\" DevicePath \"\"" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.780080 4938 generic.go:334] "Generic (PLEG): container finished" podID="7deed5a6-08da-4567-834f-a18dabf85e7a" containerID="9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a" exitCode=0 Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.780120 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerDied","Data":"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a"} Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.780145 4938 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-92rkg" event={"ID":"7deed5a6-08da-4567-834f-a18dabf85e7a","Type":"ContainerDied","Data":"97ab6bf6564772d5cec971da9d0e4477a4bf4568a94ed8ee41282364e22ea87e"} Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.780162 4938 scope.go:117] "RemoveContainer" containerID="9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.780269 4938 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-92rkg" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.802337 4938 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.809429 4938 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-92rkg"] Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.811569 4938 scope.go:117] "RemoveContainer" containerID="2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.838457 4938 scope.go:117] "RemoveContainer" containerID="1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.883729 4938 scope.go:117] "RemoveContainer" containerID="9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a" Nov 22 11:50:36 crc kubenswrapper[4938]: E1122 11:50:36.884354 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a\": container with ID starting with 9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a not found: ID does not exist" containerID="9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.884428 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a"} err="failed to get container status \"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a\": rpc error: code = NotFound desc = could not find container \"9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a\": container with ID starting with 9024e4b7c190b6625c1b6f9faa8a921f6c7290d9432b3b00064efebfadd34e5a not found: ID does not exist" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.884481 4938 scope.go:117] "RemoveContainer" containerID="2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a" Nov 22 11:50:36 crc kubenswrapper[4938]: E1122 11:50:36.884943 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a\": container with ID starting with 2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a not found: ID does not exist" containerID="2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.884975 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a"} err="failed to get container status \"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a\": rpc error: code = NotFound desc = could not find container \"2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a\": container with ID starting with 2286d03d24aa3ccc11d5bf48fa628ef6ec08d4a0c63617facdbc0562fc2eb70a not found: ID does not exist" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.885019 4938 scope.go:117] "RemoveContainer" containerID="1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250" Nov 22 11:50:36 crc kubenswrapper[4938]: E1122 11:50:36.885372 4938 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250\": container with ID starting with 1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250 not found: ID does not exist" containerID="1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250" Nov 22 11:50:36 crc kubenswrapper[4938]: I1122 11:50:36.885423 4938 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250"} err="failed to get container status \"1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250\": rpc error: code = NotFound desc = could not find container \"1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250\": container with ID starting with 1b38696508fd8e55df5826a6bf0db5f0676c1084933c1562c9423ddd51ab6250 not found: ID does not exist" Nov 22 11:50:38 crc kubenswrapper[4938]: I1122 11:50:38.459346 4938 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7deed5a6-08da-4567-834f-a18dabf85e7a" path="/var/lib/kubelet/pods/7deed5a6-08da-4567-834f-a18dabf85e7a/volumes" Nov 22 11:50:46 crc kubenswrapper[4938]: I1122 11:50:46.448544 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:50:46 crc kubenswrapper[4938]: E1122 11:50:46.450064 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:50:58 crc kubenswrapper[4938]: I1122 11:50:58.453520 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:50:58 crc kubenswrapper[4938]: E1122 11:50:58.454488 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:51:12 crc kubenswrapper[4938]: I1122 11:51:12.447796 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:51:12 crc kubenswrapper[4938]: E1122 11:51:12.448632 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:51:24 crc kubenswrapper[4938]: I1122 11:51:24.448153 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:51:24 crc kubenswrapper[4938]: E1122 11:51:24.450417 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:51:27 crc kubenswrapper[4938]: I1122 11:51:27.260255 4938 scope.go:117] "RemoveContainer" containerID="43a445ccba1c73991711705391a856aec8cf1d2151378561d15ee8211578c5f4" Nov 22 11:51:36 crc kubenswrapper[4938]: I1122 11:51:36.448473 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:51:36 crc kubenswrapper[4938]: E1122 11:51:36.449232 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:51:47 crc kubenswrapper[4938]: I1122 11:51:47.448218 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:51:47 crc kubenswrapper[4938]: E1122 11:51:47.450093 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:51:58 crc kubenswrapper[4938]: I1122 11:51:58.462837 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:51:58 crc kubenswrapper[4938]: E1122 11:51:58.463854 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:52:10 crc kubenswrapper[4938]: I1122 11:52:10.447060 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:52:10 crc kubenswrapper[4938]: E1122 11:52:10.447886 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:52:24 crc kubenswrapper[4938]: I1122 11:52:24.447787 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:52:24 crc kubenswrapper[4938]: E1122 11:52:24.449876 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:52:36 crc kubenswrapper[4938]: I1122 11:52:36.448712 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:52:36 crc kubenswrapper[4938]: E1122 11:52:36.453045 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:52:48 crc kubenswrapper[4938]: I1122 11:52:48.457095 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:52:48 crc kubenswrapper[4938]: E1122 11:52:48.458130 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:53:00 crc kubenswrapper[4938]: I1122 11:53:00.447664 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:53:00 crc kubenswrapper[4938]: E1122 11:53:00.448571 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" Nov 22 11:53:13 crc kubenswrapper[4938]: I1122 11:53:13.447323 4938 scope.go:117] "RemoveContainer" containerID="9dedc77e769597214ffb103b0200768df456e94d4caf61e624fcb4c41b884f00" Nov 22 11:53:13 crc kubenswrapper[4938]: E1122 11:53:13.448296 4938 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-slzgc_openshift-machine-config-operator(e2b98cee-eb10-409f-93b6-153856457611)\"" pod="openshift-machine-config-operator/machine-config-daemon-slzgc" podUID="e2b98cee-eb10-409f-93b6-153856457611" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110322060024433 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110322061017351 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110310644016500 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110310645015451 5ustar corecore